import { ChatOpenAI } from "langchain/chat_models/openai" import { CallbackManager } from "langchain/callbacks" import log4js from "log4js" import { ChatPromptTemplate, MessagesPlaceholder } from "langchain/prompts" import dedent from "dedent" import { ConversationChain } from "langchain/chains" import { BufferMemory, BufferWindowMemory } from "langchain/memory" const logger = log4js.getLogger("peper-gen") logger.level = "debug" const usage = { completionTokens: 0, promptTokens: 0, totalTokens: 0 } const llm = new ChatOpenAI({ openAIApiKey: "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", modelName: "gpt-3.5-turbo-16k", timeout: 1000 * 60 * 5, callbackManager: CallbackManager.fromHandlers({ async handleLLMStart(llm, prompts) { logger.log(`[LLM Start]LLM: ${JSON.stringify(llm)}`) logger.log(`['LLM Start]Prompts: ${prompts.join("\n")}`) }, async handleLLMEnd(output) { logger.log( `[LLM End]${output.generations .reduce((acc, cur) => acc.concat(cur), []) .map((i) => i.text) .join("\n")}` ) logger.log(`[LLM End]${JSON.stringify(output.llmOutput)}`) usage.completionTokens += output.llmOutput.tokenUsage.completionTokens usage.promptTokens += output.llmOutput.tokenUsage.promptTokens usage.totalTokens += output.llmOutput.tokenUsage.totalTokens }, async handleLLMError(error) { logger.error(error) } }), onFailedAttempt(error) { logger.error(error) } // configuration: { // baseURL: "https://openai.c8c.top/v1", // }, }) function conversation(system) { const chatPrompt = ChatPromptTemplate.fromMessages([ ["system", system], new MessagesPlaceholder("history"), ["human", "{input}"] ]) const memory = new BufferWindowMemory({ k: 4, memoryKey: "history", returnMessages: true }) const chain = new ConversationChain({ memory: memory, prompt: chatPrompt, llm: llm }) return { memory, chain } } export { llm, logger, conversation, usage }