| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485 |
- import { ChatOpenAI } from 'langchain/chat_models/openai'
- import { CallbackManager } from 'langchain/callbacks'
- import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
- import dedent from 'dedent'
- import { ConversationChain } from 'langchain/chains'
- import { BufferMemory, BufferWindowMemory } from 'langchain/memory'
- import { Logger } from '@nestjs/common'
- import { writeFile, mkdirSync, appendFileSync } from 'fs'
- import path = require('path')
- require('dotenv').config()
- export function createLLM() {
- const usage = { completionTokens: 0, promptTokens: 0, totalTokens: 0 }
- const llm = new ChatOpenAI({
- openAIApiKey: process.env.OPENAI_API_KEY,
- // openAIApiKey: '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',
- modelName: 'gpt-3.5-turbo-1106',
- timeout: 1000 * 60 * 5,
- configuration: {
- baseURL: 'https://openai.c8c.top/v1'
- },
- maxRetries: 4,
- callbackManager: CallbackManager.fromHandlers({
- async handleLLMStart(llm, prompts) {
- Logger.log(`[LLM Start]LLM: ${JSON.stringify(llm)}`)
- Logger.log(`['LLM Start]Prompts: ${prompts.join('\n')}`)
- const logFile = path.join(__dirname, 'llm.log')
- appendFileSync(logFile, '\n--------------------------------------\n')
- appendFileSync(logFile, prompts.join('\n'))
- },
- async handleLLMEnd(output) {
- Logger.log(
- `[LLM End]${output.generations
- .reduce((acc, cur) => acc.concat(cur), [])
- .map((i) => i.text)
- .join('\n')}`
- )
- Logger.log(`[LLM End]${JSON.stringify(output.llmOutput)}`)
- usage.completionTokens += output.llmOutput.tokenUsage.completionTokens
- usage.promptTokens += output.llmOutput.tokenUsage.promptTokens
- usage.totalTokens += output.llmOutput.tokenUsage.totalTokens
- const logFile = path.join(__dirname, 'llm.log')
- appendFileSync(logFile, '\n--------------------------------------\n')
- appendFileSync(
- logFile,
- output.generations
- .reduce((acc, cur) => acc.concat(cur), [])
- .map((i) => i.text)
- .join('\n')
- )
- },
- async handleLLMError(error) {
- Logger.error(error)
- }
- }),
- onFailedAttempt(error) {
- Logger.error(error.message, error.stack, 'OpenAI API Error')
- }
- // configuration: {
- // baseURL: "https://openai.c8c.top/v1",
- // },
- })
- function conversation(system, k = 4) {
- const chatPrompt = ChatPromptTemplate.fromMessages([
- ['system', system],
- new MessagesPlaceholder('history'),
- ['human', '{input}']
- ])
- const memory = new BufferWindowMemory({
- k,
- memoryKey: 'history',
- returnMessages: true
- })
- const chain = new ConversationChain({
- memory: memory,
- prompt: chatPrompt,
- llm: llm
- })
- return { memory, chain }
- }
- return { llm, usage, conversation }
- }
|