/ai-chat
├─ server.js
├─ package.json
└─ .env (non committare)
{
"name": "ai-chat-backend",
"version": "1.0.0",
"type": "module",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"dotenv": "^16.4.5",
"express": "^4.19.2",
"cors": "^2.8.5"
}
}
OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxx
OPENAI_MODEL=gpt-4o-mini
PORT=8787
import 'dotenv/config';
import express from 'express';
import cors from 'cors';
const app = express();
// Impostazioni di base
app.use(cors({
origin: true, // Valuti di restringere al Suo dominio
methods: ['POST', 'OPTIONS'],
allowedHeaders: ['Content-Type', 'Authorization']
}));
app.use(express.json({ limit: '1mb' }));
const PORT = process.env.PORT || 8787;
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const OPENAI_MODEL = process.env.OPENAI_MODEL || 'gpt-4o-mini';
if (!OPENAI_API_KEY) {
console.error('❌ OPENAI_API_KEY mancante in .env');
process.exit(1);
}
// Endpoint compatibile con ENDPOINT_URL del widget: /api/ai-chat
app.post('/api/ai-chat', async (req, res) => {
try {
const { message, system = "Rispondi in italiano, tono professionale.", history = [] } = req.body || {};
if (!message) {
return res.status(400).json({ error: "Parametro 'message' mancante" });
}
// Costruiamo un prompt testuale compatto (semplice e robusto)
const transcript = history
.slice(-20)
.map(m => `${m.role === 'user' ? 'Utente' : 'Assistente'}: ${m.content}`)
.join('\n');
const prompt = `${system}
Contesto:
${transcript ? transcript + '\n' : ''}Utente: ${message}
Assistente:`;
// Richiesta in streaming alla Responses API
const upstream = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Authorization': `Bearer ${OPENAI_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: OPENAI_MODEL,
input: prompt,
stream: true
})
});
if (!upstream.ok || !upstream.body) {
const txt = await upstream.text().catch(() => '');
return res.status(502).json({ error: 'Upstream error', detail: txt });
}
// Impostiamo la risposta come testo in chunk (stream)
res.setHeader('Content-Type', 'text/plain; charset=utf-8');
res.setHeader('Cache-Control', 'no-store');
res.setHeader('Transfer-Encoding', 'chunked');
const reader = upstream.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
// Parsing semplice degli eventi SSE della Responses API
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
let idx;
while ((idx = buffer.indexOf('\n')) !== -1) {
const line = buffer.slice(0, idx).trimEnd();
buffer = buffer.slice(idx + 1);
if (!line) continue;
if (line.startsWith('data:')) {
const data = line.slice(5).trim();
if (data === '[DONE]') {
break;
}
try {
const evt = JSON.parse(data);
// Eventi di delta testuale
if (evt.type === 'response.output_text.delta' && typeof evt.delta === 'string') {
res.write(evt.delta);
}
// Fine output testuale (alcuni eventi possono segnalare la chiusura)
if (evt.type === 'response.completed' || evt.type === 'response.output_text.done') {
// chiuderemo fuori dal ciclo reader
}
} catch {
// Ignora frammenti non JSON (parziali)
}
}
}
}
res.end();
} catch (err) {
console.error(err);
res.status(500).json({ error: 'Errore interno server' });
}
});
// Variante NON streaming (facoltativa): restituisce JSON { reply }
// Utile se non desidera gestire stream nel front-end
app.post('/api/ai-chat-json', async (req, res) => {
try {
const { message, system = "Rispondi in italiano, tono professionale.", history = [] } = req.body || {};
if (!message) return res.status(400).json({ error: "Parametro 'message' mancante" });
const transcript = history.slice(-20).map(m => `${m.role === 'user' ? 'Utente' : 'Assistente'}: ${m.content}`).join('\n');
const prompt = `${system}\n\nContesto:\n${transcript ? transcript + '\n' : ''}Utente: ${message}\nAssistente:`;
const upstream = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Authorization': `Bearer ${OPENAI_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: OPENAI_MODEL,
input: prompt,
stream: false
})
});
const data = await upstream.json();
const reply = data?.output_text ?? 'Nessuna risposta.';
res.json({ reply });
} catch (e) {
console.error(e);
res.status(500).json({ error: 'Errore interno server' });
}
});
app.listen(PORT, () => {
console.log(`✅ AI chat server in ascolto su http://localhost:${PORT}`);
});
npm install
npm start
const ENDPOINT_URL = "http://localhost:8787/api/ai-chat"; // oppure /api/ai-chat se proxy dietro Nginx/Apache
// filename: worker.js
export default {
async fetch(request, env) {
const cors = {
"Access-Control-Allow-Origin": request.headers.get("Origin") || "*",
"Access-Control-Allow-Methods": "POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type, Authorization",
};
// CORS preflight
if (request.method === "OPTIONS") {
return new Response(null, { headers: cors });
}
if (request.method !== "POST") {
return new Response("Method Not Allowed", { status: 405, headers: cors });
}
let payload;
try {
payload = await request.json();
} catch {
return new Response("Bad Request", { status: 400, headers: cors });
}
const { message, system = "Rispondi in italiano, tono professionale.", history = [] } = payload || {};
if (!message) {
return new Response("Missing 'message'", { status: 400, headers: cors });
}
const transcript = history.slice(-20)
.map(m => `${m.role === "user" ? "Utente" : "Assistente"}: ${m.content}`)
.join("\n");
const prompt = `${system}
Contesto:
${transcript ? transcript + "\n" : ""}Utente: ${message}
Assistente:`;
const upstream = await fetch("https://api.openai.com/v1/responses", {
method: "POST",
headers: {
"Authorization": `Bearer ${env.OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: env.OPENAI_MODEL || "gpt-4o-mini",
input: prompt,
stream: true
})
});
if (!upstream.ok || !upstream.body) {
const txt = await upstream.text().catch(() => "");
return new Response(`Upstream error: ${txt}`, { status: 502, headers: cors });
}
// Convertiamo SSE → testo semplice (chunk)
const { readable, writable } = new TransformStream();
(async () => {
const writer = writable.getWriter();
const reader = upstream.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
try {
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
let idx;
while ((idx = buffer.indexOf("\n")) !== -1) {
const line = buffer.slice(0, idx).trimEnd();
buffer = buffer.slice(idx + 1);
if (!line) continue;
if (line.startsWith("data:")) {
const data = line.slice(5).trim();
if (data === "[DONE]") break;
try {
const evt = JSON.parse(data);
if (evt.type === "response.output_text.delta" && typeof evt.delta === "string") {
await writer.write(new TextEncoder().encode(evt.delta));
}
} catch {
// Ignora frammenti non JSON
}
}
}
}
} finally {
try { await writer.close(); } catch {}
}
})();
return new Response(readable, {
status: 200,
headers: {
...cors,
"Content-Type": "text/plain; charset=utf-8",
"Cache-Control": "no-store"
}
});
}
};
const ENDPOINT_URL = "https://IL-SUO-NOME.worker.dev/ai-chat"; // o la route che ha impostato