async function interpretSpeech(speechText) {
transcriptText = "Asking OpenAI...";
transcriptDiv.html(transcriptText);
const prompt = `You are an art generator assistant. When given a description, generate parameters for a visual art piece. Return the response as a JSON object with the following keys: \`visualType\` (string, one of 'waves', 'particles', 'circles', 'lines'), \`color\` (string, hex color code, e.g., '#RRGGBB'), \`speed\` (number, 0.1 to 5), \`count\` (integer, 10 to 200). Ensure the color is a valid hex code. For example, if the description is 'red flowing lines', you might return \`{\"visualType\": \"lines\", \"color\": \"#FF0000\", \"speed\": 2, \"count\": 100}\`. If the description is 'blue starry background', you might return \`{\"visualType\": \"particles\", \"color\": \"#0000FF\", \"speed\": 0.5, \"count\": 150}\`. If the description is 'slow green circles', you might return \`{\"visualType\": \"circles\", \"color\": \"#00FF00\", \"speed\": 0.2, \"count\": 80}\`. If the description is 'fast vibrant waves', you might return \`{\"visualType\": \"waves\", \"color\": \"#FF00FF\", \"speed\": 4, \"count\": 70}\`.
Now, generate parameters for: \"${speechText}\"`;;
try {
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${openaiApiKey}`
},
body: JSON.stringify({
model: 'gpt-3.5-turbo', // Or 'gpt-4' if available and preferred
messages: [{
role: 'user',
content: prompt
}],
response_format: { type: "json_object" }, // Crucial for getting JSON output
temperature: 0.7, // Creativity level
max_tokens: 150 // Max tokens for the response
})
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(`OpenAI API error: ${response.status} - ${errorData.error.message || 'Unknown error'}`);
}
const data = await response.json();
console.log("OpenAI raw response:", data);
const openAIResponseContent = data.choices[0].message.content;
console.log("OpenAI content:", openAIResponseContent);
// Attempt to parse the JSON string
let parsedParams;
try {
parsedParams = JSON.parse(openAIResponseContent);
} catch (e) {
console.error("Failed to parse JSON from OpenAI:", e);
transcriptText = "OpenAI returned invalid JSON. Trying again...";
transcriptDiv.html(transcriptText);
// Fallback or retry
return;
}
// Validate and sanitize parameters
visualParams.visualType = ['waves', 'particles', 'circles', 'lines'].includes(parsedParams.visualType) ? parsedParams.visualType : 'waves';
visualParams.color = /^#([0-9A-Fa-f]{3}){1,2}$/.test(parsedParams.color) ? parsedParams.color : defaultColors[floor(random(defaultColors.length))]; // Validate hex color
visualParams.speed = constrain(parsedParams.speed, 0.1, 5);
visualParams.count = constrain(floor(parsedParams.count), 10, 200);
transcriptText = `OpenAI says: Visual Type: ${visualParams.visualType}, Color: ${visualParams.color}, Speed: ${visualParams.speed}, Count: ${visualParams.count}`;
transcriptDiv.html(transcriptText);
console.log("Updated visual parameters:", visualParams);
speakBack(visualParams); // Now calls the OpenAI TTS function
} catch (error) {
console.error("Error communicating with OpenAI:", error);
transcriptText = "Error: Could not get art parameters from OpenAI. Please check your API key and try again. " + error.message;
transcriptDiv.html(transcriptText);
micButton.removeAttribute('disabled'); // Re-enable button on error
}
}