llm.ts 3.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. import { ChatOpenAI } from 'langchain/chat_models/openai'
  2. import { CallbackManager } from 'langchain/callbacks'
  3. import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
  4. import dedent from 'dedent'
  5. import { ConversationChain } from 'langchain/chains'
  6. import { BufferMemory, BufferWindowMemory } from 'langchain/memory'
  7. import { Logger } from '@nestjs/common'
  8. import { writeFile, mkdirSync, appendFileSync } from 'fs'
  9. import path = require('path')
  10. require('dotenv').config()
  11. export function createLLM() {
  12. const usage = { completionTokens: 0, promptTokens: 0, totalTokens: 0 }
  13. const llm = new ChatOpenAI({
  14. openAIApiKey: process.env.OPENAI_API_KEY,
  15. // openAIApiKey: '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',
  16. modelName: 'gpt-3.5-turbo-1106',
  17. timeout: 1000 * 60 * 5,
  18. configuration: {
  19. baseURL: 'https://openai.c8c.top/v1'
  20. },
  21. maxRetries: 4,
  22. callbackManager: CallbackManager.fromHandlers({
  23. async handleLLMStart(llm, prompts) {
  24. Logger.log(`[LLM Start]LLM: ${JSON.stringify(llm)}`)
  25. Logger.log(`['LLM Start]Prompts: ${prompts.join('\n')}`)
  26. const logFile = path.join(__dirname, 'llm.log')
  27. appendFileSync(logFile, '\n--------------------------------------\n')
  28. appendFileSync(logFile, prompts.join('\n'))
  29. },
  30. async handleLLMEnd(output) {
  31. Logger.log(
  32. `[LLM End]${output.generations
  33. .reduce((acc, cur) => acc.concat(cur), [])
  34. .map((i) => i.text)
  35. .join('\n')}`
  36. )
  37. Logger.log(`[LLM End]${JSON.stringify(output.llmOutput)}`)
  38. usage.completionTokens += output.llmOutput.tokenUsage.completionTokens
  39. usage.promptTokens += output.llmOutput.tokenUsage.promptTokens
  40. usage.totalTokens += output.llmOutput.tokenUsage.totalTokens
  41. const logFile = path.join(__dirname, 'llm.log')
  42. appendFileSync(logFile, '\n--------------------------------------\n')
  43. appendFileSync(
  44. logFile,
  45. output.generations
  46. .reduce((acc, cur) => acc.concat(cur), [])
  47. .map((i) => i.text)
  48. .join('\n')
  49. )
  50. },
  51. async handleLLMError(error) {
  52. Logger.error(error)
  53. }
  54. }),
  55. onFailedAttempt(error) {
  56. Logger.error(error.message, error.stack, 'OpenAI API Error')
  57. }
  58. // configuration: {
  59. // baseURL: "https://openai.c8c.top/v1",
  60. // },
  61. })
  62. function conversation(system, k = 4) {
  63. const chatPrompt = ChatPromptTemplate.fromMessages([
  64. ['system', system],
  65. new MessagesPlaceholder('history'),
  66. ['human', '{input}']
  67. ])
  68. const memory = new BufferWindowMemory({
  69. k,
  70. memoryKey: 'history',
  71. returnMessages: true
  72. })
  73. const chain = new ConversationChain({
  74. memory: memory,
  75. prompt: chatPrompt,
  76. llm: llm
  77. })
  78. return { memory, chain }
  79. }
  80. return { llm, usage, conversation }
  81. }