250 lines
8.4 KiB
TypeScript
250 lines
8.4 KiB
TypeScript
import { serve } from "https://deno.land/std@0.168.0/http/server.ts";
|
|
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
|
|
import { load } from "https://deno.land/std@0.224.0/dotenv/mod.ts";
|
|
import SupabaseClient from "https://esm.sh/@supabase/supabase-js@2.76.1/dist/module/SupabaseClient.d.ts";
|
|
import { User } from "https://esm.sh/@supabase/auth-js@2.76.1/dist/module/lib/types.d.ts";
|
|
import { DOMParser } from "https://deno.land/x/deno_dom@v0.1.45/deno-dom-wasm.ts";
|
|
import OpenAI from "npm:openai@4";
|
|
|
|
// Load environment variables
|
|
await load({ export: true, envPath: ".env" });
|
|
|
|
const corsHeaders = {
|
|
'Access-Control-Allow-Origin': '*',
|
|
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type'
|
|
};
|
|
|
|
// Initialize OpenAI client
|
|
const openai = new OpenAI({
|
|
apiKey: Deno.env.get("OPENAI_API_KEY") || "",
|
|
});
|
|
|
|
// Load the xml doc - prompts.xml
|
|
const isProduction = Deno.env.get('DENO_DEPLOYMENT_ID') !== undefined;
|
|
let xmlContent: string;
|
|
if (isProduction) {
|
|
const response = await fetch('https://git.imbenji.dev/ImBenji/Mori/raw/branch/main/supabase/functions/llm-pipeline/prompts.xml');
|
|
xmlContent = await response.text();
|
|
} else {
|
|
xmlContent = await Deno.readTextFile('./prompts.xml');
|
|
}
|
|
const prompt_xml = new DOMParser().parseFromString(xmlContent, 'text/html');
|
|
|
|
const mori_personality = prompt_xml?.querySelector('mori_personality')?.textContent || "";
|
|
|
|
async function handleRequest(req: Resquest, controller : ReadableStreamDefaultController, user : User, supabaseClient : SupabaseClient) {
|
|
|
|
function enqueueJson(data: any) {
|
|
controller.enqueue(new TextEncoder().encode(`${JSON.stringify(data)}`));
|
|
}
|
|
|
|
const showExamples = false;
|
|
if (showExamples) {
|
|
// Example 1: Send current status to the client
|
|
let status = {
|
|
command: "update_status",
|
|
status_type: "info",
|
|
message: "Discombobulating the flux capacitor...",
|
|
};
|
|
enqueueJson(status);
|
|
|
|
// Simulate some processing delay
|
|
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
|
|
// Example 2: Send progress update to the client
|
|
status = {
|
|
command: "update_status",
|
|
status_type: "progress",
|
|
message: "Halfway through the discombobulation!",
|
|
};
|
|
enqueueJson(status);
|
|
|
|
// Simulate some processing delay
|
|
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
|
|
status = {
|
|
command: "update_status",
|
|
status_type: "progress",
|
|
message: "Lost track of what we're doing...",
|
|
};
|
|
enqueueJson(status);
|
|
|
|
// Simulate some processing delay
|
|
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
}
|
|
|
|
const requestBody = await req.json();
|
|
|
|
/*
|
|
Summarise the conversation
|
|
*/
|
|
enqueueJson({
|
|
command: "update_status",
|
|
status_type: "info",
|
|
message: "Contemplating conversation...",
|
|
});
|
|
const summarySystemPrompt = prompt_xml.querySelector('conversation_summariser')?.textContent
|
|
.replaceAll("{{PERSONALITY_INJECTION}}", mori_personality);
|
|
const summaryCompletion = await openai.chat.completions.create({
|
|
model: "gpt-4.1-mini",
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: summarySystemPrompt || "",
|
|
// Static: gets cached
|
|
},
|
|
...requestBody.messages,
|
|
{
|
|
role: "assistant",
|
|
content: `CURRENT_TIME: ${new Date().toISOString()}\n\nCONVERSATION_GIST:\n${requestBody.gist || "No existing context."}`,
|
|
// Dynamic context injection
|
|
},
|
|
],
|
|
});
|
|
let summaryJson = JSON.parse(summaryCompletion.choices[0]?.message?.content || "{}");
|
|
enqueueJson({
|
|
command: "update_gist",
|
|
content: summaryJson.context || "",
|
|
});
|
|
|
|
/*
|
|
Formulate a response plan
|
|
*/
|
|
enqueueJson({
|
|
command: "update_status",
|
|
status_type: "info",
|
|
message: "Devising response plan...",
|
|
});
|
|
const responsePlanSystemPrompt = prompt_xml.querySelector('response_planner')?.textContent
|
|
.replaceAll("{{PERSONALITY_INJECTION}}", mori_personality);
|
|
const responsePlanCompletion = await openai.chat.completions.create({
|
|
model: "gpt-4.1-mini",
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: responsePlanSystemPrompt
|
|
// Static: gets cached
|
|
},
|
|
...requestBody.messages,
|
|
// Recent conversation messages
|
|
{
|
|
role: "assistant",
|
|
content: `CURRENT_TIME: ${new Date().toISOString()}\n\nCONVERSATION_GIST:\n${summaryJson.context || "No existing context."}`
|
|
// Dynamic context injection
|
|
},
|
|
]
|
|
});
|
|
console.log("Response Plan:", responsePlanCompletion.choices[0]?.message?.content);
|
|
let responsePlanJson = JSON.parse(responsePlanCompletion.choices[0]?.message?.content || "{}");
|
|
// enqueueJson({
|
|
// command: "append_response",
|
|
// content: responsePlanCompletion.choices[0]?.message?.content || "",
|
|
// });
|
|
|
|
/*
|
|
Generate the final response
|
|
*/
|
|
enqueueJson({
|
|
command: "update_status",
|
|
status_type: "info",
|
|
message: "",
|
|
});
|
|
const chatSystemPrompt = prompt_xml.querySelector('chat_responder')?.textContent
|
|
.replaceAll("{{PERSONALITY_INJECTION}}", mori_personality);
|
|
const chatCompletion = await openai.chat.completions.create({
|
|
model: "gpt-4.1-mini",
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: chatSystemPrompt || "",
|
|
// Static: gets cached
|
|
},
|
|
...requestBody.messages,
|
|
{
|
|
role: "assistant",
|
|
content: `CURRENT_TIME: ${new Date().toISOString()}\n\nCONVERSATION_GIST:\n${summaryJson.context || "No existing context."}\n\nRESPONSE_PLAN:\n${responsePlanJson.plan || "No specific plan."}`
|
|
// Dynamic context injection
|
|
},
|
|
],
|
|
stream: true,
|
|
});
|
|
|
|
for await (const chunk of chatCompletion) {
|
|
if (chunk.choices[0]?.delta?.content) {
|
|
enqueueJson({
|
|
command: "append_response",
|
|
content: chunk.choices[0].delta.content,
|
|
});
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
serve(async (req) => {
|
|
// Handle CORS preflight requests
|
|
if (req.method === 'OPTIONS') {
|
|
return new Response('ok', { headers: corsHeaders });
|
|
}
|
|
|
|
// Get the token from the Authorization header
|
|
const authHeader = req.headers.get('Authorization');
|
|
const token = authHeader?.replace('Bearer ', '');
|
|
|
|
// Initialize the Supabase client
|
|
const supabaseClient = createClient(
|
|
Deno.env.get('SUPABASE_URL') || '',
|
|
Deno.env.get('SUPABASE_ANON_KEY') || '',
|
|
{
|
|
global: {
|
|
headers: { Authorization: `Bearer ${token}` },
|
|
},
|
|
}
|
|
);
|
|
|
|
// Authenticate the user
|
|
const { data: { user }, error } = await supabaseClient.auth.getUser(token);
|
|
|
|
if (error || !user) {
|
|
return new Response(
|
|
JSON.stringify({ error: 'Unauthorized' }),
|
|
{
|
|
status: 401,
|
|
headers: {
|
|
...corsHeaders,
|
|
'Content-Type': 'application/json'
|
|
}
|
|
}
|
|
);
|
|
}
|
|
|
|
// User is authenticated, handle the request
|
|
const readable = new ReadableStream({
|
|
async start(controller) {
|
|
// Wrap controller to log on enqueue
|
|
const originalEnqueue = controller.enqueue.bind(controller);
|
|
controller.enqueue = (chunk) => {
|
|
const decoded = new TextDecoder().decode(chunk);
|
|
console.log('Stream output:', decoded);
|
|
originalEnqueue(chunk);
|
|
};
|
|
|
|
try {
|
|
await handleRequest(req, controller, user, supabaseClient);
|
|
controller.close();
|
|
} catch (error) {
|
|
console.error("Error in stream:", error);
|
|
controller.error(error);
|
|
}
|
|
}
|
|
});
|
|
|
|
return new Response(readable, {
|
|
headers: {
|
|
...corsHeaders,
|
|
'Content-Type': 'text/event-stream',
|
|
'Cache-Control': 'no-cache',
|
|
'Connection': 'keep-alive'
|
|
}
|
|
});
|
|
});
|