xiongzhu před 2 roky
rodič
revize
c24400c736

+ 226 - 283
graph.json

@@ -14,7 +14,7 @@
         "transient": false,
         "exported": false,
         "token": "JwtModule",
-        "initTime": 1.3889579996466637
+        "initTime": 4.236540999263525
       }
     },
     "203550704": {
@@ -57,7 +57,7 @@
         "transient": false,
         "exported": false,
         "token": "InternalCoreModule",
-        "initTime": 2.810791999101639
+        "initTime": 5.741374999284744
       }
     },
     "233936880": {
@@ -118,8 +118,7 @@
         "static": true,
         "transient": false,
         "exported": false,
-        "token": "SandboxedCodeExecutor",
-        "initTime": 3.408957999199629
+        "token": "SandboxedCodeExecutor"
       }
     },
     "385041166": {
@@ -134,8 +133,7 @@
         "static": true,
         "transient": false,
         "exported": false,
-        "token": "JwtStrategy",
-        "initTime": 2.633415997028351
+        "token": "JwtStrategy"
       }
     },
     "400681591": {
@@ -150,8 +148,7 @@
         "static": true,
         "transient": false,
         "exported": true,
-        "token": "DiscoveryService",
-        "initTime": 2.79695900157094
+        "token": "DiscoveryService"
       }
     },
     "457665600": {
@@ -168,7 +165,7 @@
         "transient": false,
         "exported": false,
         "token": "AliyunModule",
-        "initTime": 1.7141249999403954
+        "initTime": 4.577375002205372
       }
     },
     "545846241": {
@@ -255,7 +252,7 @@
         "transient": false,
         "exported": true,
         "token": "Reflector",
-        "initTime": 2.732208002358675
+        "initTime": 5.6712500005960464
       }
     },
     "687352615": {
@@ -380,8 +377,7 @@
         "static": true,
         "transient": false,
         "exported": true,
-        "token": "Symbol(CONFIG_SERVICE)",
-        "initTime": 2.812959000468254
+        "token": "Symbol(CONFIG_SERVICE)"
       }
     },
     "1014874916": {
@@ -413,7 +409,7 @@
         "transient": false,
         "exported": true,
         "token": "CONFIGURATION_TOKEN",
-        "initTime": 1.9665419980883598
+        "initTime": 4.8645419999957085
       }
     },
     "1070370213": {
@@ -506,8 +502,7 @@
         "static": true,
         "transient": false,
         "exported": false,
-        "token": "CONFIGURATION_LOADER",
-        "initTime": 2.864833001047373
+        "token": "CONFIGURATION_LOADER"
       }
     },
     "1333473084": {
@@ -522,8 +517,7 @@
         "static": true,
         "transient": false,
         "exported": false,
-        "token": "CONFIGURATION_LOADER",
-        "initTime": 2.582125000655651
+        "token": "CONFIGURATION_LOADER"
       }
     },
     "1349506255": {
@@ -556,7 +550,7 @@
         "transient": false,
         "exported": false,
         "token": "FileModule",
-        "initTime": 1.3686669990420341
+        "initTime": 4.214375000447035
       }
     },
     "1399306624": {
@@ -573,7 +567,7 @@
         "transient": false,
         "exported": false,
         "token": "ThrottlerModule",
-        "initTime": 1.9613750018179417
+        "initTime": 4.858208000659943
       }
     },
     "1404752088": {
@@ -604,8 +598,7 @@
         "scope": 0,
         "transient": false,
         "exported": true,
-        "token": "Reflector",
-        "initTime": 3.626333002001047
+        "token": "Reflector"
       }
     },
     "1475573941": {
@@ -682,6 +675,22 @@
         "token": "ModuleRef"
       }
     },
+    "1579130544": {
+      "id": "1579130544",
+      "label": "ApplicationConfig",
+      "parent": "-1918983541",
+      "metadata": {
+        "type": "provider",
+        "internal": true,
+        "sourceModuleName": "OpenaiModule",
+        "durable": false,
+        "static": true,
+        "scope": 0,
+        "transient": false,
+        "exported": false,
+        "token": "ApplicationConfig"
+      }
+    },
     "1609867136": {
       "id": "1609867136",
       "label": "FileController",
@@ -694,7 +703,14 @@
         "static": true,
         "transient": false,
         "exported": false,
-        "token": "FileController"
+        "token": "FileController",
+        "enhancers": [
+          {
+            "id": "-1624918028",
+            "methodKey": "uploadFile",
+            "subtype": "interceptor"
+          }
+        ]
       }
     },
     "1623724461": {
@@ -756,8 +772,7 @@
         "static": true,
         "transient": false,
         "exported": true,
-        "token": "AliyunService",
-        "initTime": 2.923583000898361
+        "token": "AliyunService"
       }
     },
     "1712628187": {
@@ -800,7 +815,7 @@
         "transient": false,
         "exported": false,
         "token": "ConfigModule",
-        "initTime": 2.0757080018520355
+        "initTime": 4.98641600087285
       }
     },
     "1787828844": {
@@ -817,23 +832,7 @@
         "transient": false,
         "exported": false,
         "token": "ConfigHostModule",
-        "initTime": 1.9723340012133121
-      }
-    },
-    "1811342262": {
-      "id": "1811342262",
-      "label": "CONFIGURABLE_MODULE_OPTIONS[7adf7a3c9416a5fd2f4c0]",
-      "parent": "-1033621697",
-      "metadata": {
-        "type": "provider",
-        "internal": false,
-        "sourceModuleName": "DevtoolsModule",
-        "durable": false,
-        "static": true,
-        "scope": 0,
-        "transient": false,
-        "exported": false,
-        "token": "CONFIGURABLE_MODULE_OPTIONS[7adf7a3c9416a5fd2f4c0]"
+        "initTime": 4.869625002145767
       }
     },
     "1817334964": {
@@ -896,7 +895,7 @@
         "transient": false,
         "exported": true,
         "token": "REQUEST",
-        "initTime": 2.215082999318838
+        "initTime": 2.5646249987185
       }
     },
     "1989588516": {
@@ -992,7 +991,7 @@
         "transient": true,
         "exported": true,
         "token": "INQUIRER",
-        "initTime": 2.2113750018179417
+        "initTime": 2.561083000153303
       }
     },
     "-26938366": {
@@ -1041,7 +1040,7 @@
         "transient": false,
         "exported": true,
         "token": "HttpAdapterHost",
-        "initTime": 2.9109590016305447
+        "initTime": 5.931333001703024
       }
     },
     "-702581189": {
@@ -1057,7 +1056,7 @@
         "transient": false,
         "exported": true,
         "token": "LazyModuleLoader",
-        "initTime": 2.4618749991059303
+        "initTime": 5.39979100227356
       }
     },
     "-1904419534": {
@@ -1100,7 +1099,7 @@
         "transient": false,
         "exported": false,
         "token": "AppModule",
-        "initTime": 2.4542919993400574
+        "initTime": 5.394708998501301
       }
     },
     "-71302842": {
@@ -1148,7 +1147,7 @@
         "transient": false,
         "exported": false,
         "token": "AppService",
-        "initTime": 2.362790998071432
+        "initTime": 5.267290998250246
       }
     },
     "-1033621697": {
@@ -1175,7 +1174,7 @@
         "transient": false,
         "exported": false,
         "token": "DevtoolsModule",
-        "initTime": 2.3459170013666153
+        "initTime": 5.249499998986721
       }
     },
     "-751962311": {
@@ -1194,6 +1193,22 @@
         "token": "ModuleRef"
       }
     },
+    "-190334293": {
+      "id": "-190334293",
+      "label": "CONFIGURABLE_MODULE_OPTIONS[9e579419cfe20f56e38d2]",
+      "parent": "-1033621697",
+      "metadata": {
+        "type": "provider",
+        "internal": false,
+        "sourceModuleName": "DevtoolsModule",
+        "durable": false,
+        "static": true,
+        "scope": 0,
+        "transient": false,
+        "exported": false,
+        "token": "CONFIGURABLE_MODULE_OPTIONS[9e579419cfe20f56e38d2]"
+      }
+    },
     "-643397745": {
       "id": "-643397745",
       "label": "DiscoveryModule",
@@ -1218,7 +1233,7 @@
         "transient": false,
         "exported": false,
         "token": "DiscoveryModule",
-        "initTime": 2.1185000017285347
+        "initTime": 4.9926670007407665
       }
     },
     "-501891371": {
@@ -1266,7 +1281,7 @@
         "transient": false,
         "exported": true,
         "token": "MetadataScanner",
-        "initTime": 2.1332500018179417
+        "initTime": 5.003125000745058
       }
     },
     "-503631789": {
@@ -1328,7 +1343,7 @@
         "transient": false,
         "exported": false,
         "token": "ConfigModule",
-        "initTime": 1.8596660010516644
+        "initTime": 4.7417079992592335
       }
     },
     "-408540750": {
@@ -1387,7 +1402,7 @@
         "transient": false,
         "exported": false,
         "token": "TypeOrmModule",
-        "initTime": 1.8517090007662773
+        "initTime": 4.732625000178814
       }
     },
     "-1068388246": {
@@ -1477,7 +1492,7 @@
         "transient": false,
         "exported": false,
         "token": "ConfigModule",
-        "initTime": 1.6802500002086163
+        "initTime": 4.5489999987185
       }
     },
     "-408540749": {
@@ -1524,7 +1539,7 @@
         "transient": false,
         "exported": true,
         "token": "CONFIGURATION(aliyun)",
-        "initTime": 1.6869169995188713
+        "initTime": 4.555833000689745
       }
     },
     "-759474504": {
@@ -1551,7 +1566,7 @@
         "transient": false,
         "exported": false,
         "token": "SmsModule",
-        "initTime": 1.642374999821186
+        "initTime": 4.504958000034094
       }
     },
     "-811664770": {
@@ -1599,7 +1614,7 @@
         "transient": false,
         "exported": false,
         "token": "TypeOrmModule",
-        "initTime": 1.5041669979691505
+        "initTime": 4.366042003035545
       }
     },
     "-223917668": {
@@ -1642,7 +1657,7 @@
         "transient": false,
         "exported": false,
         "token": "UsersModule",
-        "initTime": 1.4964170008897781
+        "initTime": 4.358500000089407
       }
     },
     "-618964563": {
@@ -1690,7 +1705,7 @@
         "transient": false,
         "exported": false,
         "token": "HashingService",
-        "initTime": 1.4916249997913837
+        "initTime": 4.353542000055313
       }
     },
     "-1102473136": {
@@ -1722,7 +1737,7 @@
         "transient": false,
         "exported": false,
         "token": "TypeOrmModule",
-        "initTime": 1.4689580015838146
+        "initTime": 4.327167000621557
       }
     },
     "-223917667": {
@@ -1780,7 +1795,7 @@
         "transient": false,
         "exported": false,
         "token": "AuthModule",
-        "initTime": 1.4592499993741512
+        "initTime": 4.316874999552965
       }
     },
     "-120792986": {
@@ -1844,7 +1859,7 @@
         "transient": false,
         "exported": false,
         "token": "ConfigModule",
-        "initTime": 1.418333001434803
+        "initTime": 4.271291997283697
       }
     },
     "-408540748": {
@@ -1891,7 +1906,7 @@
         "transient": false,
         "exported": true,
         "token": "CONFIGURATION(jwt)",
-        "initTime": 1.4312499985098839
+        "initTime": 4.2827909998595715
       }
     },
     "-536515674": {
@@ -1932,8 +1947,7 @@
         "static": true,
         "transient": false,
         "exported": false,
-        "token": "JWT_MODULE_OPTIONS",
-        "initTime": 2.5627500005066395
+        "token": "JWT_MODULE_OPTIONS"
       }
     },
     "-1327562795": {
@@ -1961,6 +1975,80 @@
         "exported": false,
         "token": "ApplicationConfig"
       }
+    },
+    "-1624918028": {
+      "id": "-1624918028",
+      "label": "579419cfe20f56e38d244",
+      "parent": "-1327562795",
+      "metadata": {
+        "type": "injectable",
+        "internal": false,
+        "sourceModuleName": "FileModule",
+        "durable": false,
+        "static": true,
+        "transient": false,
+        "exported": false,
+        "token": "579419cfe20f56e38d244",
+        "subtype": "interceptor"
+      }
+    },
+    "-1918983541": {
+      "id": "-1918983541",
+      "label": "OpenaiModule",
+      "metadata": {
+        "type": "module",
+        "global": false,
+        "dynamic": false,
+        "internal": false
+      }
+    },
+    "-1281481412": {
+      "id": "-1281481412",
+      "label": "OpenaiModule",
+      "parent": "-1918983541",
+      "metadata": {
+        "type": "provider",
+        "internal": true,
+        "sourceModuleName": "OpenaiModule",
+        "durable": false,
+        "static": true,
+        "scope": 0,
+        "transient": false,
+        "exported": false,
+        "token": "OpenaiModule",
+        "initTime": 4.19870799779892
+      }
+    },
+    "-1941036795": {
+      "id": "-1941036795",
+      "label": "ModuleRef",
+      "parent": "-1918983541",
+      "metadata": {
+        "type": "provider",
+        "internal": true,
+        "sourceModuleName": "OpenaiModule",
+        "durable": false,
+        "static": true,
+        "scope": 0,
+        "transient": false,
+        "exported": false,
+        "token": "ModuleRef"
+      }
+    },
+    "-2033682719": {
+      "id": "-2033682719",
+      "label": "OpenaiService",
+      "parent": "-1918983541",
+      "metadata": {
+        "type": "provider",
+        "internal": false,
+        "sourceModuleName": "OpenaiModule",
+        "durable": false,
+        "static": true,
+        "transient": false,
+        "exported": false,
+        "token": "OpenaiService"
+      }
     }
   },
   "edges": {
@@ -2014,23 +2102,6 @@
       },
       "id": "184060730"
     },
-    "189694679": {
-      "source": "1434456218",
-      "target": "671882984",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "InternalCoreModule",
-        "sourceClassName": "Reflector",
-        "targetClassName": "Reflector",
-        "sourceClassToken": "Reflector",
-        "targetClassToken": "Reflector",
-        "targetModuleName": "InternalCoreModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor",
-        "internal": true
-      },
-      "id": "189694679"
-    },
     "219775656": {
       "source": "-19112576",
       "target": "-1134487807",
@@ -2281,22 +2352,6 @@
       },
       "id": "1010561902"
     },
-    "1028357956": {
-      "source": "373235992",
-      "target": "-438112115",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "DevtoolsModule",
-        "sourceClassName": "SandboxedCodeExecutor",
-        "targetClassName": "MetadataScanner",
-        "sourceClassToken": "SandboxedCodeExecutor",
-        "targetClassToken": "MetadataScanner",
-        "targetModuleName": "DiscoveryModule",
-        "keyOrIndex": 2,
-        "injectionType": "constructor"
-      },
-      "id": "1028357956"
-    },
     "1056428433": {
       "source": "-19112576",
       "target": "555176277",
@@ -2467,6 +2522,16 @@
       },
       "id": "1498156362"
     },
+    "1577638499": {
+      "source": "-1918983541",
+      "target": "1482218087",
+      "metadata": {
+        "type": "module-to-module",
+        "sourceModuleName": "OpenaiModule",
+        "targetModuleName": "ThrottlerModule"
+      },
+      "id": "1577638499"
+    },
     "1635338963": {
       "source": "-2135742481",
       "target": "555176277",
@@ -2607,40 +2672,6 @@
       },
       "id": "2109546608"
     },
-    "-1759625292": {
-      "source": "-553129559",
-      "target": "-326832201",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "InternalCoreModule",
-        "sourceClassName": "HttpAdapterHost",
-        "targetClassName": "HttpAdapterHost",
-        "sourceClassToken": "HttpAdapterHost",
-        "targetClassToken": "HttpAdapterHost",
-        "targetModuleName": "InternalCoreModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor",
-        "internal": true
-      },
-      "id": "-1759625292"
-    },
-    "-221187751": {
-      "source": "400681591",
-      "target": "-26938366",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "DiscoveryModule",
-        "sourceClassName": "DiscoveryService",
-        "targetClassName": "ModulesContainer",
-        "sourceClassToken": "DiscoveryService",
-        "targetClassToken": "ModulesContainer",
-        "targetModuleName": "InternalCoreModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor",
-        "internal": true
-      },
-      "id": "-221187751"
-    },
     "-692061701": {
       "source": "-19112576",
       "target": "-1033621697",
@@ -2701,6 +2732,16 @@
       },
       "id": "-789499568"
     },
+    "-668061890": {
+      "source": "-19112576",
+      "target": "-1918983541",
+      "metadata": {
+        "type": "module-to-module",
+        "sourceModuleName": "AppModule",
+        "targetModuleName": "OpenaiModule"
+      },
+      "id": "-668061890"
+    },
     "-815553404": {
       "source": "-19112576",
       "target": "211572259",
@@ -3141,167 +3182,77 @@
       },
       "id": "-1517205111"
     },
-    "-951393523": {
-      "source": "930519593",
-      "target": "1063746662",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "ConfigHostModule",
-        "sourceClassName": "ConfigService",
-        "targetClassName": "CONFIGURATION_TOKEN",
-        "sourceClassToken": "Symbol(CONFIG_SERVICE)",
-        "targetClassToken": "CONFIGURATION_TOKEN",
-        "targetModuleName": "ConfigHostModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor"
-      },
-      "id": "-951393523"
-    },
-    "-1597821732": {
-      "source": "373235992",
-      "target": "-1904419534",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "DevtoolsModule",
-        "sourceClassName": "SandboxedCodeExecutor",
-        "targetClassName": "SerializedGraph",
-        "sourceClassToken": "SandboxedCodeExecutor",
-        "targetClassToken": "SerializedGraph",
-        "targetModuleName": "InternalCoreModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor",
-        "internal": true
-      },
-      "id": "-1597821732"
-    },
-    "-2041147046": {
-      "source": "373235992",
-      "target": "-26938366",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "DevtoolsModule",
-        "sourceClassName": "SandboxedCodeExecutor",
-        "targetClassName": "ModulesContainer",
-        "sourceClassToken": "SandboxedCodeExecutor",
-        "targetClassToken": "ModulesContainer",
-        "targetModuleName": "InternalCoreModule",
-        "keyOrIndex": 1,
-        "injectionType": "constructor",
-        "internal": true
-      },
-      "id": "-2041147046"
-    },
-    "-1045206778": {
-      "source": "1310883338",
-      "target": "1063746662",
-      "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "ConfigModule",
-        "sourceClassName": "CONFIGURATION_LOADER",
-        "targetClassName": "CONFIGURATION_TOKEN",
-        "sourceClassToken": "CONFIGURATION_LOADER",
-        "targetClassToken": "CONFIGURATION_TOKEN",
-        "targetModuleName": "ConfigHostModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor"
-      },
-      "id": "-1045206778"
-    },
-    "-481663631": {
-      "source": "1310883338",
-      "target": "-1790226991",
+    "-481638553": {
+      "source": "-1918983541",
+      "target": "555176277",
       "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "ConfigModule",
-        "sourceClassName": "CONFIGURATION_LOADER",
-        "targetClassName": "CONFIGURATION(aliyun)",
-        "sourceClassToken": "CONFIGURATION_LOADER",
-        "targetClassToken": "CONFIGURATION(aliyun)",
-        "targetModuleName": "ConfigModule",
-        "keyOrIndex": 1,
-        "injectionType": "constructor"
+        "type": "module-to-module",
+        "sourceModuleName": "OpenaiModule",
+        "targetModuleName": "InternalCoreModule"
       },
-      "id": "-481663631"
+      "id": "-481638553"
     },
-    "-1782972962": {
-      "source": "1333473084",
-      "target": "1063746662",
+    "-1676930569": {
+      "source": "-1918983541",
+      "target": "1712628187",
       "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "ConfigModule",
-        "sourceClassName": "CONFIGURATION_LOADER",
-        "targetClassName": "CONFIGURATION_TOKEN",
-        "sourceClassToken": "CONFIGURATION_LOADER",
-        "targetClassToken": "CONFIGURATION_TOKEN",
-        "targetModuleName": "ConfigHostModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor"
+        "type": "module-to-module",
+        "sourceModuleName": "OpenaiModule",
+        "targetModuleName": "ConfigModule"
       },
-      "id": "-1782972962"
+      "id": "-1676930569"
     },
-    "-1103714220": {
-      "source": "1333473084",
-      "target": "-1453472830",
+    "-474351304": {
+      "source": "-1918983541",
+      "target": "211572259",
       "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "ConfigModule",
-        "sourceClassName": "CONFIGURATION_LOADER",
-        "targetClassName": "CONFIGURATION(jwt)",
-        "sourceClassToken": "CONFIGURATION_LOADER",
-        "targetClassToken": "CONFIGURATION(jwt)",
-        "targetModuleName": "ConfigModule",
-        "keyOrIndex": 1,
-        "injectionType": "constructor"
+        "type": "module-to-module",
+        "sourceModuleName": "OpenaiModule",
+        "targetModuleName": "ConfigHostModule"
       },
-      "id": "-1103714220"
+      "id": "-474351304"
     },
-    "-1276722459": {
-      "source": "1705191969",
-      "target": "-1790226991",
+    "-848542805": {
+      "source": "-1918983541",
+      "target": "-1026304274",
       "metadata": {
-        "type": "class-to-class",
-        "sourceModuleName": "AliyunModule",
-        "sourceClassName": "AliyunService",
-        "targetClassName": "CONFIGURATION(aliyun)",
-        "sourceClassToken": "AliyunService",
-        "targetClassToken": "CONFIGURATION(aliyun)",
-        "targetModuleName": "ConfigModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor"
+        "type": "module-to-module",
+        "sourceModuleName": "OpenaiModule",
+        "targetModuleName": "TypeOrmCoreModule"
       },
-      "id": "-1276722459"
+      "id": "-848542805"
     },
-    "-711278315": {
-      "source": "-1900804336",
-      "target": "-1453472830",
+    "-25889379": {
+      "source": "1609867136",
+      "target": "-1624918028",
       "metadata": {
         "type": "class-to-class",
-        "sourceModuleName": "JwtModule",
-        "sourceClassName": "JWT_MODULE_OPTIONS",
-        "targetClassName": "CONFIGURATION(jwt)",
-        "sourceClassToken": "JWT_MODULE_OPTIONS",
-        "targetClassToken": "CONFIGURATION(jwt)",
-        "targetModuleName": "ConfigModule",
-        "keyOrIndex": 0,
-        "injectionType": "constructor"
+        "sourceModuleName": "FileModule",
+        "sourceClassName": "FileController",
+        "targetClassName": "579419cfe20f56e38d244",
+        "sourceClassToken": "FileController",
+        "targetClassToken": "579419cfe20f56e38d244",
+        "targetModuleName": "FileModule",
+        "injectionType": "decorator"
       },
-      "id": "-711278315"
+      "id": "-25889379"
     },
-    "-1717178441": {
-      "source": "385041166",
-      "target": "-1453472830",
+    "-1759625292": {
+      "source": "-553129559",
+      "target": "-326832201",
       "metadata": {
         "type": "class-to-class",
-        "sourceModuleName": "AuthModule",
-        "sourceClassName": "JwtStrategy",
-        "targetClassName": "CONFIGURATION(jwt)",
-        "sourceClassToken": "JwtStrategy",
-        "targetClassToken": "CONFIGURATION(jwt)",
-        "targetModuleName": "ConfigModule",
+        "sourceModuleName": "InternalCoreModule",
+        "sourceClassName": "HttpAdapterHost",
+        "targetClassName": "HttpAdapterHost",
+        "sourceClassToken": "HttpAdapterHost",
+        "targetClassToken": "HttpAdapterHost",
+        "targetModuleName": "InternalCoreModule",
         "keyOrIndex": 0,
-        "injectionType": "constructor"
+        "injectionType": "constructor",
+        "internal": true
       },
-      "id": "-1717178441"
+      "id": "-1759625292"
     }
   },
   "entrypoints": {},
@@ -3312,16 +3263,8 @@
   "status": "partial",
   "metadata": {
     "cause": {
-      "type": "unknown-dependencies",
-      "context": {
-        "index": 0,
-        "dependencies": [
-          "Object"
-        ],
-        "name": "Object"
-      },
-      "moduleId": "-1327562795",
-      "nodeId": "1837744481"
+      "type": "unknown",
+      "error": {}
     }
   }
 }

+ 6 - 0
package.json

@@ -22,6 +22,7 @@
   },
   "dependencies": {
     "@alicloud/dysmsapi20170525": "2.0.23",
+    "@dqbd/tiktoken": "^1.0.6",
     "@nestjs/common": "^9.3.3",
     "@nestjs/config": "^2.3.1",
     "@nestjs/core": "^9.3.3",
@@ -35,18 +36,23 @@
     "@nestjs/typeorm": "^9.0.1",
     "@types/multer": "^1.4.7",
     "ali-oss": "^6.17.1",
+    "axios": "^1.3.5",
     "bcrypt": "^5.1.0",
     "class-transformer": "^0.5.1",
     "class-validator": "^0.13.0",
     "date-fns": "^2.29.3",
+    "eventsource-parser": "^1.0.0",
     "express-basic-auth": "^1.2.1",
     "express-handlebars": "^7.0.6",
     "handlebars": "^4.7.7",
+    "keyv": "^4.5.2",
     "mysql2": "^3.1.2",
     "nodemailer": "^6.9.1",
+    "p-timeout": "^6.1.1",
     "passport": "^0.6.0",
     "passport-http-bearer": "^1.0.1",
     "passport-jwt": "^4.0.1",
+    "quick-lru": "^6.1.1",
     "randomstring": "^1.2.3",
     "reflect-metadata": "^0.1.13",
     "rimraf": "^4.1.2",

+ 3 - 1
src/app.module.ts

@@ -11,6 +11,7 @@ import { SmsModule } from './sms/sms.module'
 import { DevtoolsModule } from '@nestjs/devtools-integration'
 import { AuthModule } from './auth/auth.module';
 import { FileModule } from './file/file.module';
+import { OpenaiModule } from './openai/openai.module';
 
 @Module({
     imports: [
@@ -61,7 +62,8 @@ import { FileModule } from './file/file.module';
         SmsModule,
         UsersModule,
         AuthModule,
-        FileModule
+        FileModule,
+        OpenaiModule
     ],
     controllers: [AppController],
     providers: [AppService]

+ 437 - 0
src/chatapi/chatgpt-api.ts

@@ -0,0 +1,437 @@
+import * as Keyv from 'keyv'
+import QuickLRU from 'quick-lru'
+import { v4 as uuidv4 } from 'uuid'
+import * as tokenizer from './tokenizer'
+import * as types from './types'
+import { fetch as globalFetch } from './fetch'
+import { fetchSSE } from './fetch-sse'
+
+const CHATGPT_MODEL = 'gpt-3.5-turbo'
+
+const USER_LABEL_DEFAULT = 'User'
+const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
+
+export class ChatGPTAPI {
+    protected _apiKey: string
+    protected _apiBaseUrl: string
+    protected _apiOrg?: string
+    protected _debug: boolean
+
+    protected _systemMessage: string
+    protected _completionParams: Omit<types.openai.CreateChatCompletionRequest, 'messages' | 'n'>
+    protected _maxModelTokens: number
+    protected _maxResponseTokens: number
+    protected _fetch: types.FetchFn
+
+    protected _getMessageById: types.GetMessageByIdFunction
+    protected _upsertMessage: types.UpsertMessageFunction
+
+    protected _messageStore: Keyv<types.ChatMessage>
+
+    /**
+     * Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
+     *
+     * @param apiKey - OpenAI API key (required).
+     * @param apiOrg - Optional OpenAI API organization (optional).
+     * @param apiBaseUrl - Optional override for the OpenAI API base URL.
+     * @param debug - Optional enables logging debugging info to stdout.
+     * @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
+     * @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
+     * @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
+     * @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
+     * @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
+     * @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
+     * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
+     */
+    constructor(opts: types.ChatGPTAPIOptions) {
+        const {
+            apiKey,
+            apiOrg,
+            apiBaseUrl = 'https://api.openai.com/v1',
+            debug = false,
+            messageStore,
+            completionParams,
+            systemMessage,
+            maxModelTokens = 4000,
+            maxResponseTokens = 1000,
+            getMessageById,
+            upsertMessage,
+            fetch = globalFetch
+        } = opts
+
+        this._apiKey = apiKey
+        this._apiOrg = apiOrg
+        this._apiBaseUrl = apiBaseUrl
+        this._debug = !!debug
+        this._fetch = fetch
+
+        this._completionParams = {
+            model: CHATGPT_MODEL,
+            temperature: 0.8,
+            top_p: 1.0,
+            presence_penalty: 1.0,
+            ...completionParams
+        }
+
+        this._systemMessage = systemMessage
+
+        if (this._systemMessage === undefined) {
+            const currentDate = new Date().toISOString().split('T')[0]
+            this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
+        }
+
+        this._maxModelTokens = maxModelTokens
+        this._maxResponseTokens = maxResponseTokens
+
+        this._getMessageById = getMessageById ?? this._defaultGetMessageById
+        this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
+
+        if (messageStore) {
+            this._messageStore = messageStore
+        } else {
+            this._messageStore = new Keyv<types.ChatMessage, any>({
+                store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
+            })
+        }
+
+        if (!this._apiKey) {
+            throw new Error('OpenAI missing required apiKey')
+        }
+
+        if (!this._fetch) {
+            throw new Error('Invalid environment; fetch is not defined')
+        }
+
+        if (typeof this._fetch !== 'function') {
+            throw new Error('Invalid "fetch" is not a function')
+        }
+    }
+
+    /**
+     * Sends a message to the OpenAI chat completions endpoint, waits for the response
+     * to resolve, and returns the response.
+     *
+     * If you want your response to have historical context, you must provide a valid `parentMessageId`.
+     *
+     * If you want to receive a stream of partial responses, use `opts.onProgress`.
+     *
+     * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
+     *
+     * @param message - The prompt message to send
+     * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
+     * @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
+     * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
+     * @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
+     * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
+     * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
+     * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
+     * @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
+     *
+     * @returns The response from ChatGPT
+     */
+    async sendMessage(text: string, opts: types.SendMessageOptions = {}): Promise<types.ChatMessage> {
+        const {
+            parentMessageId,
+            messageId = uuidv4(),
+            timeoutMs,
+            onProgress,
+            stream = onProgress ? true : false,
+            completionParams,
+            conversationId
+        } = opts
+
+        let { abortSignal } = opts
+
+        let abortController: AbortController = null
+        if (timeoutMs && !abortSignal) {
+            abortController = new AbortController()
+            abortSignal = abortController.signal
+        }
+
+        const message: types.ChatMessage = {
+            role: 'user',
+            id: messageId,
+            conversationId,
+            parentMessageId,
+            text
+        }
+
+        const latestQuestion = message
+
+        const { messages, maxTokens, numTokens } = await this._buildMessages(text, opts)
+
+        const result: types.ChatMessage = {
+            role: 'assistant',
+            id: uuidv4(),
+            conversationId,
+            parentMessageId: messageId,
+            text: ''
+        }
+
+        const responseP = new Promise<types.ChatMessage>(async (resolve, reject) => {
+            const url = `${this._apiBaseUrl}/chat/completions`
+            const headers = {
+                'Content-Type': 'application/json',
+                Authorization: `Bearer ${this._apiKey}`
+            }
+            const body = {
+                max_tokens: maxTokens,
+                ...this._completionParams,
+                ...completionParams,
+                messages,
+                stream
+            }
+
+            // Support multiple organizations
+            // See https://platform.openai.com/docs/api-reference/authentication
+            if (this._apiOrg) {
+                headers['OpenAI-Organization'] = this._apiOrg
+            }
+
+            if (this._debug) {
+                console.log(`sendMessage (${numTokens} tokens)`, body)
+            }
+
+            if (stream) {
+                fetchSSE(
+                    url,
+                    {
+                        method: 'POST',
+                        headers,
+                        body: JSON.stringify(body),
+                        signal: abortSignal,
+                        onMessage: (data: string) => {
+                            if (data === '[DONE]') {
+                                result.text = result.text.trim()
+                                return resolve(result)
+                            }
+
+                            try {
+                                const response: types.openai.CreateChatCompletionDeltaResponse = JSON.parse(data)
+
+                                if (response.id) {
+                                    result.id = response.id
+                                }
+
+                                if (response.choices?.length) {
+                                    const delta = response.choices[0].delta
+                                    result.delta = delta.content
+                                    if (delta?.content) result.text += delta.content
+
+                                    if (delta.role) {
+                                        result.role = delta.role
+                                    }
+
+                                    result.detail = response
+                                    onProgress?.(result)
+                                }
+                            } catch (err) {
+                                console.warn('OpenAI stream SEE event unexpected error', err)
+                                return reject(err)
+                            }
+                        }
+                    },
+                    this._fetch
+                ).catch(reject)
+            } else {
+                try {
+                    const res = await this._fetch(url, {
+                        method: 'POST',
+                        headers,
+                        body: JSON.stringify(body),
+                        signal: abortSignal
+                    })
+
+                    if (!res.ok) {
+                        const reason = await res.text()
+                        const msg = `OpenAI error ${res.status || res.statusText}: ${reason}`
+                        const error = new types.ChatGPTError(msg, { cause: res })
+                        error.statusCode = res.status
+                        error.statusText = res.statusText
+                        return reject(error)
+                    }
+
+                    const response: types.openai.CreateChatCompletionResponse = await res.json()
+                    if (this._debug) {
+                        console.log(response)
+                    }
+
+                    if (response?.id) {
+                        result.id = response.id
+                    }
+
+                    if (response?.choices?.length) {
+                        const message = response.choices[0].message
+                        result.text = message.content
+                        if (message.role) {
+                            result.role = message.role
+                        }
+                    } else {
+                        const res = response as any
+                        return reject(new Error(`OpenAI error: ${res?.detail?.message || res?.detail || 'unknown'}`))
+                    }
+
+                    result.detail = response
+
+                    return resolve(result)
+                } catch (err) {
+                    return reject(err)
+                }
+            }
+        }).then(async (message) => {
+            if (message.detail && !message.detail.usage) {
+                try {
+                    const promptTokens = numTokens
+                    const completionTokens = await this._getTokenCount(message.text)
+                    message.detail.usage = {
+                        prompt_tokens: promptTokens,
+                        completion_tokens: completionTokens,
+                        total_tokens: promptTokens + completionTokens,
+                        estimated: true
+                    }
+                } catch (err) {
+                    // TODO: this should really never happen, but if it does,
+                    // we should handle notify the user gracefully
+                }
+            }
+
+            return Promise.all([this._upsertMessage(latestQuestion), this._upsertMessage(message)]).then(() => message)
+        })
+
+        if (timeoutMs) {
+            if (abortController) {
+                // This will be called when a timeout occurs in order for us to forcibly
+                // ensure that the underlying HTTP request is aborted.
+                ;(responseP as any).cancel = () => {
+                    abortController.abort()
+                }
+            }
+            const pTimeout = (await import('p-timeout')).default
+            return pTimeout(responseP, {
+                milliseconds: timeoutMs,
+                message: 'OpenAI timed out waiting for response'
+            })
+        } else {
+            return responseP
+        }
+    }
+
+    get apiKey(): string {
+        return this._apiKey
+    }
+
+    set apiKey(apiKey: string) {
+        this._apiKey = apiKey
+    }
+
+    get apiOrg(): string {
+        return this._apiOrg
+    }
+
+    set apiOrg(apiOrg: string) {
+        this._apiOrg = apiOrg
+    }
+
+    protected async _buildMessages(text: string, opts: types.SendMessageOptions) {
+        const { systemMessage = this._systemMessage } = opts
+        let { parentMessageId } = opts
+
+        const userLabel = USER_LABEL_DEFAULT
+        const assistantLabel = ASSISTANT_LABEL_DEFAULT
+
+        const maxNumTokens = this._maxModelTokens - this._maxResponseTokens
+        let messages: types.openai.ChatCompletionRequestMessage[] = []
+
+        if (systemMessage) {
+            messages.push({
+                role: 'system',
+                content: systemMessage
+            })
+        }
+
+        const systemMessageOffset = messages.length
+        let nextMessages = text
+            ? messages.concat([
+                  {
+                      role: 'user',
+                      content: text,
+                      name: opts.name
+                  }
+              ])
+            : messages
+        let numTokens = 0
+
+        do {
+            const prompt = nextMessages
+                .reduce((prompt, message) => {
+                    switch (message.role) {
+                        case 'system':
+                            return prompt.concat([`Instructions:\n${message.content}`])
+                        case 'user':
+                            return prompt.concat([`${userLabel}:\n${message.content}`])
+                        default:
+                            return prompt.concat([`${assistantLabel}:\n${message.content}`])
+                    }
+                }, [] as string[])
+                .join('\n\n')
+
+            const nextNumTokensEstimate = await this._getTokenCount(prompt)
+            const isValidPrompt = nextNumTokensEstimate <= maxNumTokens
+
+            if (prompt && !isValidPrompt) {
+                break
+            }
+
+            messages = nextMessages
+            numTokens = nextNumTokensEstimate
+
+            if (!isValidPrompt) {
+                break
+            }
+
+            if (!parentMessageId) {
+                break
+            }
+
+            const parentMessage = await this._getMessageById(parentMessageId)
+            if (!parentMessage) {
+                break
+            }
+
+            const parentMessageRole = parentMessage.role || 'user'
+
+            nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
+                {
+                    role: parentMessageRole,
+                    content: parentMessage.text,
+                    name: parentMessage.name
+                },
+                ...nextMessages.slice(systemMessageOffset)
+            ])
+
+            parentMessageId = parentMessage.parentMessageId
+        } while (true)
+
+        // Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
+        // for the response.
+        const maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens))
+
+        return { messages, maxTokens, numTokens }
+    }
+
+    protected async _getTokenCount(text: string) {
+        // TODO: use a better fix in the tokenizer
+        text = text.replace(/<\|endoftext\|>/g, '')
+
+        return tokenizer.encode(text).length
+    }
+
+    protected async _defaultGetMessageById(id: string): Promise<types.ChatMessage> {
+        const res = await this._messageStore.get(id)
+        return res
+    }
+
+    protected async _defaultUpsertMessage(message: types.ChatMessage): Promise<void> {
+        await this._messageStore.set(message.id, message)
+    }
+}

+ 265 - 0
src/chatapi/chatgpt-unofficial-proxy-api.ts

@@ -0,0 +1,265 @@
+import pTimeout from 'p-timeout'
+import { v4 as uuidv4 } from 'uuid'
+
+import * as types from './types'
+import { fetch as globalFetch } from './fetch'
+import { fetchSSE } from './fetch-sse'
+import { isValidUUIDv4 } from './utils'
+
+export class ChatGPTUnofficialProxyAPI {
+  protected _accessToken: string
+  protected _apiReverseProxyUrl: string
+  protected _debug: boolean
+  protected _model: string
+  protected _headers: Record<string, string>
+  protected _fetch: types.FetchFn
+
+  /**
+   * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
+   */
+  constructor(opts: {
+    accessToken: string
+
+    /** @defaultValue `https://bypass.duti.tech/api/conversation` **/
+    apiReverseProxyUrl?: string
+
+    /** @defaultValue `text-davinci-002-render-sha` **/
+    model?: string
+
+    /** @defaultValue `false` **/
+    debug?: boolean
+
+    /** @defaultValue `undefined` **/
+    headers?: Record<string, string>
+
+    fetch?: types.FetchFn
+  }) {
+    const {
+      accessToken,
+      apiReverseProxyUrl = 'https://bypass.duti.tech/api/conversation',
+      model = 'text-davinci-002-render-sha',
+      debug = false,
+      headers,
+      fetch = globalFetch
+    } = opts
+
+    this._accessToken = accessToken
+    this._apiReverseProxyUrl = apiReverseProxyUrl
+    this._debug = !!debug
+    this._model = model
+    this._fetch = fetch
+    this._headers = headers
+
+    if (!this._accessToken) {
+      throw new Error('ChatGPT invalid accessToken')
+    }
+
+    if (!this._fetch) {
+      throw new Error('Invalid environment; fetch is not defined')
+    }
+
+    if (typeof this._fetch !== 'function') {
+      throw new Error('Invalid "fetch" is not a function')
+    }
+  }
+
+  get accessToken(): string {
+    return this._accessToken
+  }
+
+  set accessToken(value: string) {
+    this._accessToken = value
+  }
+
+  /**
+   * Sends a message to ChatGPT, waits for the response to resolve, and returns
+   * the response.
+   *
+   * If you want your response to have historical context, you must provide a valid `parentMessageId`.
+   *
+   * If you want to receive a stream of partial responses, use `opts.onProgress`.
+   * If you want to receive the full response, including message and conversation IDs,
+   * you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
+   * helper.
+   *
+   * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.
+   *
+   * @param message - The prompt message to send
+   * @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID)
+   * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
+   * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
+   * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
+   * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
+   * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
+   *
+   * @returns The response from ChatGPT
+   */
+  async sendMessage(
+    text: string,
+    opts: types.SendMessageBrowserOptions = {}
+  ): Promise<types.ChatMessage> {
+    if (!!opts.conversationId !== !!opts.parentMessageId) {
+      throw new Error(
+        'ChatGPTUnofficialProxyAPI.sendMessage: conversationId and parentMessageId must both be set or both be undefined'
+      )
+    }
+
+    if (opts.conversationId && !isValidUUIDv4(opts.conversationId)) {
+      throw new Error(
+        'ChatGPTUnofficialProxyAPI.sendMessage: conversationId is not a valid v4 UUID'
+      )
+    }
+
+    if (opts.parentMessageId && !isValidUUIDv4(opts.parentMessageId)) {
+      throw new Error(
+        'ChatGPTUnofficialProxyAPI.sendMessage: parentMessageId is not a valid v4 UUID'
+      )
+    }
+
+    if (opts.messageId && !isValidUUIDv4(opts.messageId)) {
+      throw new Error(
+        'ChatGPTUnofficialProxyAPI.sendMessage: messageId is not a valid v4 UUID'
+      )
+    }
+
+    const {
+      conversationId,
+      parentMessageId = uuidv4(),
+      messageId = uuidv4(),
+      action = 'next',
+      timeoutMs,
+      onProgress
+    } = opts
+
+    let { abortSignal } = opts
+
+    let abortController: AbortController = null
+    if (timeoutMs && !abortSignal) {
+      abortController = new AbortController()
+      abortSignal = abortController.signal
+    }
+
+    const body: types.ConversationJSONBody = {
+      action,
+      messages: [
+        {
+          id: messageId,
+          role: 'user',
+          content: {
+            content_type: 'text',
+            parts: [text]
+          }
+        }
+      ],
+      model: this._model,
+      parent_message_id: parentMessageId
+    }
+
+    if (conversationId) {
+      body.conversation_id = conversationId
+    }
+
+    const result: types.ChatMessage = {
+      role: 'assistant',
+      id: uuidv4(),
+      parentMessageId: messageId,
+      conversationId,
+      text: ''
+    }
+
+    const responseP = new Promise<types.ChatMessage>((resolve, reject) => {
+      const url = this._apiReverseProxyUrl
+      const headers = {
+        ...this._headers,
+        Authorization: `Bearer ${this._accessToken}`,
+        Accept: 'text/event-stream',
+        'Content-Type': 'application/json'
+      }
+
+      if (this._debug) {
+        console.log('POST', url, { body, headers })
+      }
+
+      fetchSSE(
+        url,
+        {
+          method: 'POST',
+          headers,
+          body: JSON.stringify(body),
+          signal: abortSignal,
+          onMessage: (data: string) => {
+            if (data === '[DONE]') {
+              return resolve(result)
+            }
+
+            try {
+              const convoResponseEvent: types.ConversationResponseEvent =
+                JSON.parse(data)
+              if (convoResponseEvent.conversation_id) {
+                result.conversationId = convoResponseEvent.conversation_id
+              }
+
+              if (convoResponseEvent.message?.id) {
+                result.id = convoResponseEvent.message.id
+              }
+
+              const message = convoResponseEvent.message
+              // console.log('event', JSON.stringify(convoResponseEvent, null, 2))
+
+              if (message) {
+                let text = message?.content?.parts?.[0]
+
+                if (text) {
+                  result.text = text
+
+                  if (onProgress) {
+                    onProgress(result)
+                  }
+                }
+              }
+            } catch (err) {
+              reject(err)
+            }
+          },
+          onError: (err) => {
+            reject(err)
+          }
+        },
+        this._fetch
+      ).catch((err) => {
+        const errMessageL = err.toString().toLowerCase()
+
+        if (
+          result.text &&
+          (errMessageL === 'error: typeerror: terminated' ||
+            errMessageL === 'typeerror: terminated')
+        ) {
+          // OpenAI sometimes forcefully terminates the socket from their end before
+          // the HTTP request has resolved cleanly. In my testing, these cases tend to
+          // happen when OpenAI has already send the last `response`, so we can ignore
+          // the `fetch` error in this case.
+          return resolve(result)
+        } else {
+          return reject(err)
+        }
+      })
+    })
+
+    if (timeoutMs) {
+      if (abortController) {
+        // This will be called when a timeout occurs in order for us to forcibly
+        // ensure that the underlying HTTP request is aborted.
+        ;(responseP as any).cancel = () => {
+          abortController.abort()
+        }
+      }
+
+      return pTimeout(responseP, {
+        milliseconds: timeoutMs,
+        message: 'ChatGPT timed out waiting for response'
+      })
+    } else {
+      return responseP
+    }
+  }
+}

+ 89 - 0
src/chatapi/fetch-sse.ts

@@ -0,0 +1,89 @@
+import { createParser } from 'eventsource-parser'
+
+import * as types from './types'
+import { fetch as globalFetch } from './fetch'
+import { streamAsyncIterable } from './stream-async-iterable'
+
+export async function fetchSSE(
+  url: string,
+  options: Parameters<typeof fetch>[1] & {
+    onMessage: (data: string) => void
+    onError?: (error: any) => void
+  },
+  fetch: types.FetchFn = globalFetch
+) {
+  const { onMessage, onError, ...fetchOptions } = options
+  const res = await fetch(url, fetchOptions)
+  if (!res.ok) {
+    let reason: string
+
+    try {
+      reason = await res.text()
+    } catch (err) {
+      reason = res.statusText
+    }
+
+    const msg = `ChatGPT error ${res.status}: ${reason}`
+    const error = new types.ChatGPTError(msg, { cause: res })
+    error.statusCode = res.status
+    error.statusText = res.statusText
+    throw error
+  }
+
+  const parser = createParser((event) => {
+    if (event.type === 'event') {
+      onMessage(event.data)
+    }
+  })
+
+  // handle special response errors
+  const feed = (chunk: string) => {
+    let response = null
+
+    try {
+      response = JSON.parse(chunk)
+    } catch {
+      // ignore
+    }
+
+    if (response?.detail?.type === 'invalid_request_error') {
+      const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`
+      const error = new types.ChatGPTError(msg, { cause: response })
+      error.statusCode = response.detail.code
+      error.statusText = response.detail.message
+
+      if (onError) {
+        onError(error)
+      } else {
+        console.error(error)
+      }
+
+      // don't feed to the event parser
+      return
+    }
+
+    parser.feed(chunk)
+  }
+
+  if (!res.body.getReader) {
+    // Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to
+    // web standards, so this is a workaround...
+    const body: NodeJS.ReadableStream = res.body as any
+
+    if (!body.on || !body.read) {
+      throw new types.ChatGPTError('unsupported "fetch" implementation')
+    }
+
+    body.on('readable', () => {
+      let chunk: string | Buffer
+      while (null !== (chunk = body.read())) {
+        feed(chunk.toString())
+      }
+    })
+  } else {
+    for await (const chunk of streamAsyncIterable(res.body)) {
+      const str = new TextDecoder().decode(chunk)
+      feed(str)
+    }
+  }
+}

+ 5 - 0
src/chatapi/fetch.ts

@@ -0,0 +1,5 @@
+/// <reference lib="dom" />
+
+const fetch = globalThis.fetch
+
+export { fetch }

+ 3 - 0
src/chatapi/index.ts

@@ -0,0 +1,3 @@
+export * from './chatgpt-api'
+export * from './chatgpt-unofficial-proxy-api'
+export * from './types'

+ 14 - 0
src/chatapi/stream-async-iterable.ts

@@ -0,0 +1,14 @@
+export async function* streamAsyncIterable<T>(stream: ReadableStream<T>) {
+  const reader = stream.getReader()
+  try {
+    while (true) {
+      const { done, value } = await reader.read()
+      if (done) {
+        return
+      }
+      yield value
+    }
+  } finally {
+    reader.releaseLock()
+  }
+}

+ 8 - 0
src/chatapi/tokenizer.ts

@@ -0,0 +1,8 @@
+import { get_encoding } from '@dqbd/tiktoken'
+
+// TODO: make this configurable
+const tokenizer = get_encoding('cl100k_base')
+
+export function encode(input: string): Uint32Array {
+  return tokenizer.encode(input)
+}

+ 443 - 0
src/chatapi/types.ts

@@ -0,0 +1,443 @@
+import * as Keyv from 'keyv'
+
+export type Role = 'user' | 'assistant' | 'system'
+
+export type FetchFn = typeof fetch
+
+export type ChatGPTAPIOptions = {
+    apiKey: string
+
+    /** @defaultValue `'https://api.openai.com'` **/
+    apiBaseUrl?: string
+
+    apiOrg?: string
+
+    /** @defaultValue `false` **/
+    debug?: boolean
+
+    completionParams?: Partial<Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>>
+
+    systemMessage?: string
+
+    /** @defaultValue `4096` **/
+    maxModelTokens?: number
+
+    /** @defaultValue `1000` **/
+    maxResponseTokens?: number
+
+    messageStore?: Keyv
+    getMessageById?: GetMessageByIdFunction
+    upsertMessage?: UpsertMessageFunction
+
+    fetch?: FetchFn
+}
+
+export type SendMessageOptions = {
+    /** The name of a user in a multi-user chat. */
+    name?: string
+    parentMessageId?: string
+    conversationId?: string
+    messageId?: string
+    stream?: boolean
+    systemMessage?: string
+    timeoutMs?: number
+    onProgress?: (partialResponse: ChatMessage) => void
+    abortSignal?: AbortSignal
+    completionParams?: Partial<Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>>
+}
+
+export type MessageActionType = 'next' | 'variant'
+
+export type SendMessageBrowserOptions = {
+    conversationId?: string
+    parentMessageId?: string
+    messageId?: string
+    action?: MessageActionType
+    timeoutMs?: number
+    onProgress?: (partialResponse: ChatMessage) => void
+    abortSignal?: AbortSignal
+}
+
+export interface ChatMessage {
+    id: string
+    text: string
+    role: Role
+    name?: string
+    delta?: string
+    detail?: openai.CreateChatCompletionResponse | CreateChatCompletionStreamResponse
+
+    // relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI
+    parentMessageId?: string
+
+    // only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
+    conversationId?: string
+}
+
+export class ChatGPTError extends Error {
+    statusCode?: number
+    statusText?: string
+    isFinal?: boolean
+    accountId?: string
+    cause?: any
+    constructor(message: string, options?: any) {
+        super(message)
+        this.cause = options?.cause
+    }
+}
+
+/** Returns a chat message from a store by it's ID (or null if not found). */
+export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
+
+/** Upserts a chat message to a store. */
+export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
+
+export interface CreateChatCompletionStreamResponse extends openai.CreateChatCompletionDeltaResponse {
+    usage: CreateCompletionStreamResponseUsage
+}
+
+export interface CreateCompletionStreamResponseUsage extends openai.CreateCompletionResponseUsage {
+    estimated: true
+}
+
+/**
+ * https://chat.openapi.com/backend-api/conversation
+ */
+export type ConversationJSONBody = {
+    /**
+     * The action to take
+     */
+    action: string
+
+    /**
+     * The ID of the conversation
+     */
+    conversation_id?: string
+
+    /**
+     * Prompts to provide
+     */
+    messages: Prompt[]
+
+    /**
+     * The model to use
+     */
+    model: string
+
+    /**
+     * The parent message ID
+     */
+    parent_message_id: string
+}
+
+export type Prompt = {
+    /**
+     * The content of the prompt
+     */
+    content: PromptContent
+
+    /**
+     * The ID of the prompt
+     */
+    id: string
+
+    /**
+     * The role played in the prompt
+     */
+    role: Role
+}
+
+export type ContentType = 'text'
+
+export type PromptContent = {
+    /**
+     * The content type of the prompt
+     */
+    content_type: ContentType
+
+    /**
+     * The parts to the prompt
+     */
+    parts: string[]
+}
+
+export type ConversationResponseEvent = {
+    message?: Message
+    conversation_id?: string
+    error?: string | null
+}
+
+export type Message = {
+    id: string
+    content: MessageContent
+    role: Role
+    user: string | null
+    create_time: string | null
+    update_time: string | null
+    end_turn: null
+    weight: number
+    recipient: string
+    metadata: MessageMetadata
+}
+
+export type MessageContent = {
+    content_type: string
+    parts: string[]
+}
+
+export type MessageMetadata = any
+
+export namespace openai {
+    export interface CreateChatCompletionDeltaResponse {
+        id: string
+        object: 'chat.completion.chunk'
+        created: number
+        model: string
+        choices: [
+            {
+                delta: {
+                    role: Role
+                    content?: string
+                }
+                index: number
+                finish_reason: string | null
+            }
+        ]
+    }
+
+    /**
+     *
+     * @export
+     * @interface ChatCompletionRequestMessage
+     */
+    export interface ChatCompletionRequestMessage {
+        /**
+         * The role of the author of this message.
+         * @type {string}
+         * @memberof ChatCompletionRequestMessage
+         */
+        role: ChatCompletionRequestMessageRoleEnum
+        /**
+         * The contents of the message
+         * @type {string}
+         * @memberof ChatCompletionRequestMessage
+         */
+        content: string
+        /**
+         * The name of the user in a multi-user chat
+         * @type {string}
+         * @memberof ChatCompletionRequestMessage
+         */
+        name?: string
+    }
+    export declare const ChatCompletionRequestMessageRoleEnum: {
+        readonly System: 'system'
+        readonly User: 'user'
+        readonly Assistant: 'assistant'
+    }
+    export declare type ChatCompletionRequestMessageRoleEnum =
+        (typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
+    /**
+     *
+     * @export
+     * @interface ChatCompletionResponseMessage
+     */
+    export interface ChatCompletionResponseMessage {
+        /**
+         * The role of the author of this message.
+         * @type {string}
+         * @memberof ChatCompletionResponseMessage
+         */
+        role: ChatCompletionResponseMessageRoleEnum
+        /**
+         * The contents of the message
+         * @type {string}
+         * @memberof ChatCompletionResponseMessage
+         */
+        content: string
+    }
+    export declare const ChatCompletionResponseMessageRoleEnum: {
+        readonly System: 'system'
+        readonly User: 'user'
+        readonly Assistant: 'assistant'
+    }
+    export declare type ChatCompletionResponseMessageRoleEnum =
+        (typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]
+    /**
+     *
+     * @export
+     * @interface CreateChatCompletionRequest
+     */
+    export interface CreateChatCompletionRequest {
+        /**
+         * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
+         * @type {string}
+         * @memberof CreateChatCompletionRequest
+         */
+        model: string
+        /**
+         * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
+         * @type {Array<ChatCompletionRequestMessage>}
+         * @memberof CreateChatCompletionRequest
+         */
+        messages: Array<ChatCompletionRequestMessage>
+        /**
+         * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.  We generally recommend altering this or `top_p` but not both.
+         * @type {number}
+         * @memberof CreateChatCompletionRequest
+         */
+        temperature?: number | null
+        /**
+         * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.  We generally recommend altering this or `temperature` but not both.
+         * @type {number}
+         * @memberof CreateChatCompletionRequest
+         */
+        top_p?: number | null
+        /**
+         * How many chat completion choices to generate for each input message.
+         * @type {number}
+         * @memberof CreateChatCompletionRequest
+         */
+        n?: number | null
+        /**
+         * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
+         * @type {boolean}
+         * @memberof CreateChatCompletionRequest
+         */
+        stream?: boolean | null
+        /**
+         *
+         * @type {CreateChatCompletionRequestStop}
+         * @memberof CreateChatCompletionRequest
+         */
+        stop?: CreateChatCompletionRequestStop
+        /**
+         * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
+         * @type {number}
+         * @memberof CreateChatCompletionRequest
+         */
+        max_tokens?: number
+        /**
+         * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.  [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
+         * @type {number}
+         * @memberof CreateChatCompletionRequest
+         */
+        presence_penalty?: number | null
+        /**
+         * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.  [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
+         * @type {number}
+         * @memberof CreateChatCompletionRequest
+         */
+        frequency_penalty?: number | null
+        /**
+         * Modify the likelihood of specified tokens appearing in the completion.  Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
+         * @type {object}
+         * @memberof CreateChatCompletionRequest
+         */
+        logit_bias?: object | null
+        /**
+         * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+         * @type {string}
+         * @memberof CreateChatCompletionRequest
+         */
+        user?: string
+    }
+    /**
+     * @type CreateChatCompletionRequestStop
+     * Up to 4 sequences where the API will stop generating further tokens.
+     * @export
+     */
+    export declare type CreateChatCompletionRequestStop = Array<string> | string
+    /**
+     *
+     * @export
+     * @interface CreateChatCompletionResponse
+     */
+    export interface CreateChatCompletionResponse {
+        /**
+         *
+         * @type {string}
+         * @memberof CreateChatCompletionResponse
+         */
+        id: string
+        /**
+         *
+         * @type {string}
+         * @memberof CreateChatCompletionResponse
+         */
+        object: string
+        /**
+         *
+         * @type {number}
+         * @memberof CreateChatCompletionResponse
+         */
+        created: number
+        /**
+         *
+         * @type {string}
+         * @memberof CreateChatCompletionResponse
+         */
+        model: string
+        /**
+         *
+         * @type {Array<CreateChatCompletionResponseChoicesInner>}
+         * @memberof CreateChatCompletionResponse
+         */
+        choices: Array<CreateChatCompletionResponseChoicesInner>
+        /**
+         *
+         * @type {CreateCompletionResponseUsage}
+         * @memberof CreateChatCompletionResponse
+         */
+        usage?: CreateCompletionResponseUsage
+    }
+    /**
+     *
+     * @export
+     * @interface CreateChatCompletionResponseChoicesInner
+     */
+    export interface CreateChatCompletionResponseChoicesInner {
+        /**
+         *
+         * @type {number}
+         * @memberof CreateChatCompletionResponseChoicesInner
+         */
+        index?: number
+        /**
+         *
+         * @type {ChatCompletionResponseMessage}
+         * @memberof CreateChatCompletionResponseChoicesInner
+         */
+        message?: ChatCompletionResponseMessage
+        /**
+         *
+         * @type {string}
+         * @memberof CreateChatCompletionResponseChoicesInner
+         */
+        finish_reason?: string
+    }
+    /**
+     *
+     * @export
+     * @interface CreateCompletionResponseUsage
+     */
+    export interface CreateCompletionResponseUsage {
+        /**
+         *
+         * @type {number}
+         * @memberof CreateCompletionResponseUsage
+         */
+        prompt_tokens: number
+        /**
+         *
+         * @type {number}
+         * @memberof CreateCompletionResponseUsage
+         */
+        completion_tokens: number
+        /**
+         *
+         * @type {number}
+         * @memberof CreateCompletionResponseUsage
+         */
+        total_tokens: number
+    }
+}

+ 6 - 0
src/chatapi/utils.ts

@@ -0,0 +1,6 @@
+const uuidv4Re =
+  /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i
+
+export function isValidUUIDv4(str: string): boolean {
+  return str && uuidv4Re.test(str)
+}

+ 15 - 0
src/openai/openai.controller.ts

@@ -0,0 +1,15 @@
+import { Controller, Post, Req, Res } from '@nestjs/common'
+import { ApiTags } from '@nestjs/swagger'
+import { OpenaiService } from './openai.service'
+
+@ApiTags('openai')
+@Controller('openai')
+export class OpenaiController {
+    constructor(private readonly openaiService: OpenaiService) {}
+
+    @Post('chat')
+    public async chat(@Req() req, @Res() res) {
+        this.openaiService.chat(req, res)
+        return 'chat'
+    }
+}

+ 9 - 0
src/openai/openai.module.ts

@@ -0,0 +1,9 @@
+import { Module } from '@nestjs/common';
+import { OpenaiService } from './openai.service';
+import { OpenaiController } from './openai.controller';
+
+@Module({
+  providers: [OpenaiService],
+  controllers: [OpenaiController]
+})
+export class OpenaiModule {}

+ 21 - 0
src/openai/openai.service.ts

@@ -0,0 +1,21 @@
+import { Injectable } from '@nestjs/common'
+import { ChatGPTAPI } from 'src/chatapi'
+
+@Injectable()
+export class OpenaiService {
+    constructor() {}
+
+    public async chat(req, res) {
+        const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
+
+        const prompt = 'Write a python version of bubble sort. Do not include example usage.'
+
+        console.log(prompt)
+        const result = await api.sendMessage(prompt, {
+            onProgress: (partialResponse) => {
+                console.log(partialResponse.text)
+            }
+        })
+        console.log(result.text)
+    }
+}

+ 47 - 1
yarn.lock

@@ -429,6 +429,11 @@
   dependencies:
     "@jridgewell/trace-mapping" "0.3.9"
 
+"@dqbd/tiktoken@^1.0.6":
+  version "1.0.6"
+  resolved "https://registry.npmmirror.com/@dqbd/tiktoken/-/tiktoken-1.0.6.tgz#96bfd0a4909726c61551a8c783493f01841bd163"
+  integrity sha512-umSdeZTy/SbPPKVuZKV/XKyFPmXSN145CcM3iHjBbmhlohBJg7vaDp4cPCW+xNlWL6L2U1sp7T2BD+di2sUKdA==
+
 "@eslint-community/eslint-utils@^4.2.0":
   version "4.4.0"
   resolved "https://registry.npmmirror.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59"
@@ -1696,6 +1701,15 @@ asynckit@^0.4.0:
   resolved "https://registry.npmmirror.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
   integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
 
+axios@^1.3.5:
+  version "1.3.5"
+  resolved "https://registry.npmmirror.com/axios/-/axios-1.3.5.tgz#e07209b39a0d11848e3e341fa087acd71dadc542"
+  integrity sha512-glL/PvG/E+xCWwV8S6nCHcrfg1exGx7vxyUIivIA1iL7BIh6bePylCfVHwp6k13ao7SATxB6imau2kqY+I67kw==
+  dependencies:
+    follow-redirects "^1.15.0"
+    form-data "^4.0.0"
+    proxy-from-env "^1.1.0"
+
 babel-jest@^29.5.0:
   version "29.5.0"
   resolved "https://registry.npmmirror.com/babel-jest/-/babel-jest-29.5.0.tgz#3fe3ddb109198e78b1c88f9ebdecd5e4fc2f50a5"
@@ -2650,6 +2664,11 @@ events@^3.2.0:
   resolved "https://registry.npmmirror.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400"
   integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==
 
+eventsource-parser@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.npmmirror.com/eventsource-parser/-/eventsource-parser-1.0.0.tgz#6332e37fd5512e3c8d9df05773b2bf9e152ccc04"
+  integrity sha512-9jgfSCa3dmEme2ES3mPByGXfgZ87VbP97tng1G2nWwWx6bV2nYxm2AWCrbQjXToSe+yYlqaZNtxffR9IeQr95g==
+
 execa@^4.0.2:
   version "4.1.0"
   resolved "https://registry.npmmirror.com/execa/-/execa-4.1.0.tgz#4e5491ad1572f2f17a77d388c6c857135b22847a"
@@ -2883,6 +2902,11 @@ flatted@^3.1.0:
   resolved "https://registry.npmmirror.com/flatted/-/flatted-3.2.7.tgz#609f39207cb614b89d0765b477cb2d437fbf9787"
   integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==
 
+follow-redirects@^1.15.0:
+  version "1.15.2"
+  resolved "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
+  integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
+
 fork-ts-checker-webpack-plugin@8.0.0:
   version "8.0.0"
   resolved "https://registry.npmmirror.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-8.0.0.tgz#dae45dfe7298aa5d553e2580096ced79b6179504"
@@ -3962,6 +3986,11 @@ jsesc@^2.5.1:
   resolved "https://registry.npmmirror.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
   integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
 
+json-buffer@3.0.1:
+  version "3.0.1"
+  resolved "https://registry.npmmirror.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13"
+  integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==
+
 json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1:
   version "2.3.1"
   resolved "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d"
@@ -4040,6 +4069,13 @@ jws@^3.2.2:
     jwa "^1.4.1"
     safe-buffer "^5.0.1"
 
+keyv@^4.5.2:
+  version "4.5.2"
+  resolved "https://registry.npmmirror.com/keyv/-/keyv-4.5.2.tgz#0e310ce73bf7851ec702f2eaf46ec4e3805cce56"
+  integrity sha512-5MHbFaKn8cNSmVW7BYnijeAVlE4cYA/SVkifVgrh7yotnfhKmjuXpDKjrABLnT0SfHWV21P8ow07OGfRrNDg8g==
+  dependencies:
+    json-buffer "3.0.1"
+
 kitx@^2.0.0, kitx@^2.1.0:
   version "2.1.0"
   resolved "https://registry.npmmirror.com/kitx/-/kitx-2.1.0.tgz#fc7fbf78eb6ed7a5a3fd2d7afb3011e29d0e44c8"
@@ -4641,6 +4677,11 @@ p-locate@^5.0.0:
   dependencies:
     p-limit "^3.0.2"
 
+p-timeout@^6.1.1:
+  version "6.1.1"
+  resolved "https://registry.npmmirror.com/p-timeout/-/p-timeout-6.1.1.tgz#bcee5e37d730f5474d973b6ff226751a1a5e6ff1"
+  integrity sha512-yqz2Wi4fiFRpMmK0L2pGAU49naSUaP23fFIQL2Y6YT+qDGPoFwpvgQM/wzc6F8JoenUkIlAFa4Ql7NguXBxI7w==
+
 p-try@^2.0.0:
   version "2.2.0"
   resolved "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
@@ -4896,7 +4937,7 @@ proxy-agent@^5.0.0:
     proxy-from-env "^1.0.0"
     socks-proxy-agent "^5.0.0"
 
-proxy-from-env@^1.0.0:
+proxy-from-env@^1.0.0, proxy-from-env@^1.1.0:
   version "1.1.0"
   resolved "https://registry.npmmirror.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2"
   integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==
@@ -4938,6 +4979,11 @@ queue-microtask@^1.2.2:
   resolved "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243"
   integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
 
+quick-lru@^6.1.1:
+  version "6.1.1"
+  resolved "https://registry.npmmirror.com/quick-lru/-/quick-lru-6.1.1.tgz#f8e5bf9010376c126c80c1a62827a526c0e60adf"
+  integrity sha512-S27GBT+F0NTRiehtbrgaSE1idUAJ5bX8dPAQTdylEyNlrdcH5X4Lz7Edz3DYzecbsCluD5zO8ZNEe04z3D3u6Q==
+
 randombytes@2.0.3:
   version "2.0.3"
   resolved "https://registry.npmmirror.com/randombytes/-/randombytes-2.0.3.tgz#674c99760901c3c4112771a31e521dc349cc09ec"