import { Prompt } from "@models/Prompt.model";
import { getLogger } from "@utils/asyncLocalStorage";
import yamlModule from "js-yaml";
import { AssistantInvestigationFormat } from "../../schemas/assistant.validation";
/**
* Generates an investigation without steps and objects based on the provided metadata.
* If the investigation is not found, it throws an error.
* @category Services
* @param {AIProcessor} aiProcessor - AI processor instance.
* @param {string} message - The user's message from which the lesson must be detected.
* @param {string} history - The chat history between the user and the assistant.
* @param {string} investigationMetadata - The metadata of the investigation.
* @returns {Promise<IAssistantInvestigationFormat | null>} - The generated investigation without steps and objects.
* @throws {Error} - If the investigation is not found.
*/
export async function generateInvestigationWithoutStepsAndObjects(aiProcessor, message, history, investigationMetadata) {
const logger = getLogger();
try {
logger.info("Generating investigation without steps and objects.");
const prompt = await Prompt.findOne({
name: "generate_investigation_without_steps_and_objects",
});
if (!prompt) {
throw new Error("generate_investigation_without_steps_and_objects prompt not found.");
}
const promptTemplate = prompt.template
.replace("{guide}", investigationMetadata)
.replace("{history}", history || "-");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await aiProcessor.fetchLLMResponse(promptTemplate, AssistantInvestigationFormat));
if (response.title === undefined ||
response.title === null ||
response.curriculum === undefined ||
response.curriculum === null ||
response.unitNumberAndTitle === undefined ||
response.unitNumberAndTitle === null ||
response.grade === undefined ||
response.grade === null ||
response.lessonNumberAndTitle === undefined ||
response.lessonNumberAndTitle === null ||
response.objectives === undefined ||
response.objectives === null ||
response.ngss === undefined ||
response.ngss === null ||
response.analyticalFacts === undefined ||
response.analyticalFacts === null ||
response.goals === undefined ||
response.goals === null ||
response.day === undefined ||
response.day === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to generate investigation without steps and objects from message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for generating investigation without steps and objects is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to generate investigation without steps and objects for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Generates objects of investigation based on the provided metadata and plain investigation.
* If the objects are not found, it throws an error.
* @category Services
* @param {AIProcessor} aiProcessor - AI processor instance.
* @param {string} message - The user's message from which the lesson must be detected.
* @param {string} history - The chat history between the user and the assistant.
* @param {string} investigationMetadata - The metadata of the investigation.
* @param {string} investigation - The plain investigation without steps and objects with yaml format.
* @returns {Promise<IAssistantInvestigationFormat | null>} - The generated objects of the provided investigation.
* @throws {Error} - If the objects are not found.
*/
export async function generateInvestigationObjects(aiProcessor, message, history, investigationMetadata, investigation) {
const logger = getLogger();
try {
logger.info("Generating objects of the provided investigation.");
const prompt = await Prompt.findOne({
name: "generate_investigation_objects",
});
if (!prompt) {
throw new Error("generate_investigation_objects prompt not found.");
}
const promptTemplate = prompt.template
.replace("{guide}", investigationMetadata)
.replace("{history}", history || "-")
.replace("{investigation}", investigation || "-");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await aiProcessor.fetchLLMResponse(promptTemplate, AssistantInvestigationFormat));
if (response.objects === undefined || response.objects === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to generate objects of the provided investigation from message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for generating objects of the provided investigation is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to generate objects of the provided investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Generates steps of investigation based on the provided metadata and the provided investigation.
* If the steps are not found, it throws an error.
* @category Services
* @param {AIProcessor} aiProcessor - AI processor instance.
* @param {string} message - The user's message from which the lesson must be detected.
* @param {string} history - The chat history between the user and the assistant.
* @param {string} investigationMetadata - The metadata of the investigation.
* @param {string} investigation - The provided investigation without steps with yaml format.
* @returns {Promise<IAssistantInvestigationFormat | null>} - The generated steps of the provided investigation.
* @throws {Error} - If the steps are not found.
*/
export async function generateInvestigationSteps(aiProcessor, message, history, investigationMetadata, investigation) {
const logger = getLogger();
try {
logger.info("Generating steps of the provided investigation.");
const prompt = await Prompt.findOne({
name: "generate_investigation_steps",
});
if (!prompt) {
throw new Error("generate_investigation_steps prompt not found.");
}
const promptTemplate = prompt.template
.replace("{guide}", investigationMetadata)
.replace("{history}", history || "-")
.replace("{investigation}", investigation || "-");
let retry = 0;
let response = null;
while (retry < 3) {
response = (await aiProcessor.fetchLLMResponse(promptTemplate, AssistantInvestigationFormat));
if (response.steps === undefined || response.steps === null) {
retry += 1;
}
else {
break;
}
}
if (retry === 3 || !response) {
throw new Error(`Failed to generate steps of the provided investigation from message: ${message}. Response is: ${JSON.stringify(response)}.`);
}
logger.info(`Response for generating steps of the provided investigation is: ${JSON.stringify(response)}`);
return response;
}
catch (error) {
logger.error({
message: `Failed to generate steps of the provided investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
/**
* Generates an entire investigation based on the provided metadata.
* If the investigation is not found, it throws an error.
* @category Services
* @param {AIProcessor} aiProcessor - AI processor instance.
* @param {string} message - The user's message from which the lesson must be detected.
* @param {FormattedHistory} history - The chat history between the user and the assistant.
* @param {IMetadataDocument} investigationMetadata - The metadata of the investigation.
* @returns {Promise<IAssistantInvestigationFormat | null>} - The generated investigation.
* @throws {Error} - If the investigation is not found.
*/
export async function generateInvestigation(aiProcessor, message, history, investigationMetadata) {
const logger = getLogger();
try {
logger.info("Generating investigation.");
let chatHistory = [];
if (history) {
chatHistory.push(...history);
}
chatHistory.push({ role: "user", content: message });
const yaml = yamlModule;
const investigationMetadataObject = investigationMetadata.toObject({
versionKey: false,
transform: (doc, ret) => {
delete ret._id;
return ret;
},
});
const investigationMetadataYaml = yaml.dump(investigationMetadataObject);
let investigation = (await generateInvestigationWithoutStepsAndObjects(aiProcessor, message, yaml.dump(chatHistory), investigationMetadataYaml));
const objects = await generateInvestigationObjects(aiProcessor, message, yaml.dump(chatHistory), investigationMetadataYaml, yaml.dump(investigation));
investigation.objects = objects?.objects;
const steps = await generateInvestigationSteps(aiProcessor, message, yaml.dump(chatHistory), investigationMetadataYaml, yaml.dump(investigation));
investigation.steps = steps?.steps;
logger.info(`Response for generating investigation is: ${JSON.stringify(investigation)}`);
return investigation;
}
catch (error) {
logger.error({
message: `Failed to generate investigation for message: ${message}.`,
error: error instanceof Error ? error.message : "Unknown error",
});
throw error;
}
}
Source