llm.mjs 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. import { ChatOpenAI } from "langchain/chat_models/openai"
  2. import { CallbackManager } from "langchain/callbacks"
  3. import log4js from "log4js"
  4. import { ChatPromptTemplate, MessagesPlaceholder } from "langchain/prompts"
  5. import dedent from "dedent"
  6. import { ConversationChain } from "langchain/chains"
  7. import { BufferMemory, BufferWindowMemory } from "langchain/memory"
  8. const logger = log4js.getLogger("peper-gen")
  9. logger.level = "debug"
  10. const usage = { completionTokens: 0, promptTokens: 0, totalTokens: 0 }
  11. const llm = new ChatOpenAI({
  12. openAIApiKey: "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",
  13. modelName: "gpt-3.5-turbo-16k",
  14. timeout: 1000 * 60 * 5,
  15. callbackManager: CallbackManager.fromHandlers({
  16. async handleLLMStart(llm, prompts) {
  17. logger.log(`[LLM Start]LLM: ${JSON.stringify(llm)}`)
  18. logger.log(`['LLM Start]Prompts: ${prompts.join("\n")}`)
  19. },
  20. async handleLLMEnd(output) {
  21. logger.log(
  22. `[LLM End]${output.generations
  23. .reduce((acc, cur) => acc.concat(cur), [])
  24. .map((i) => i.text)
  25. .join("\n")}`
  26. )
  27. logger.log(`[LLM End]${JSON.stringify(output.llmOutput)}`)
  28. usage.completionTokens += output.llmOutput.tokenUsage.completionTokens
  29. usage.promptTokens += output.llmOutput.tokenUsage.promptTokens
  30. usage.totalTokens += output.llmOutput.tokenUsage.totalTokens
  31. },
  32. async handleLLMError(error) {
  33. logger.error(error)
  34. }
  35. }),
  36. onFailedAttempt(error) {
  37. logger.error(error)
  38. }
  39. // configuration: {
  40. // baseURL: "https://openai.c8c.top/v1",
  41. // },
  42. })
  43. function conversation(system) {
  44. const chatPrompt = ChatPromptTemplate.fromMessages([
  45. ["system", system],
  46. new MessagesPlaceholder("history"),
  47. ["human", "{input}"]
  48. ])
  49. const memory = new BufferWindowMemory({
  50. k: 4,
  51. memoryKey: "history",
  52. returnMessages: true
  53. })
  54. const chain = new ConversationChain({
  55. memory: memory,
  56. prompt: chatPrompt,
  57. llm: llm
  58. })
  59. return { memory, chain }
  60. }
  61. export { llm, logger, conversation, usage }