Hjgugugjhuhjggg commited on
Commit
4c939b4
1 Parent(s): 27f1e78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -7,11 +7,10 @@ from langchain_community.llms import VLLM
7
  from gptcache import Cache
8
  from gptcache.manager.factory import manager_factory
9
  from gptcache.processor.pre import get_prompt
10
- from langchain_community.cache import GPTCache
11
  from sklearn.metrics.pairwise import cosine_similarity
12
  from sentence_transformers import SentenceTransformer
13
  import torch
14
- from langchain.callbacks import get_openai_callback
15
  import langchain
16
 
17
  app = FastAPI()
@@ -24,7 +23,6 @@ def init_gptcache(cache_obj, llm):
24
  cache_obj.init(pre_embedding_func=get_prompt, data_manager=manager_factory(manager="map", data_dir=f"map_cache_{hashed_llm}"))
25
 
26
  cache = Cache()
27
- langchain.llm_cache = GPTCache(cache=cache)
28
 
29
  hf_token = os.environ.get("HF_TOKEN")
30
 
@@ -38,6 +36,8 @@ llm_models = {
38
  for llm_name, llm in llm_models.items():
39
  init_gptcache(cache, llm_name)
40
 
 
 
41
  try:
42
  sentence_model = SentenceTransformer('all-mpnet-base-v2', device='cpu')
43
  except Exception as e:
@@ -91,4 +91,4 @@ async def generateText(request: Request):
91
  return JSONResponse({"best_model": best_model, "text": best_response, "all_responses": all_responses})
92
 
93
  if __name__ == "__main__":
94
- uvicorn.run(app, host="0.0.0.0", port=5001)
 
7
  from gptcache import Cache
8
  from gptcache.manager.factory import manager_factory
9
  from gptcache.processor.pre import get_prompt
10
+ from langchain.callbacks import get_openai_callback
11
  from sklearn.metrics.pairwise import cosine_similarity
12
  from sentence_transformers import SentenceTransformer
13
  import torch
 
14
  import langchain
15
 
16
  app = FastAPI()
 
23
  cache_obj.init(pre_embedding_func=get_prompt, data_manager=manager_factory(manager="map", data_dir=f"map_cache_{hashed_llm}"))
24
 
25
  cache = Cache()
 
26
 
27
  hf_token = os.environ.get("HF_TOKEN")
28
 
 
36
  for llm_name, llm in llm_models.items():
37
  init_gptcache(cache, llm_name)
38
 
39
+ langchain.llm_cache = langchain.cache.GPTCache(session=cache)
40
+
41
  try:
42
  sentence_model = SentenceTransformer('all-mpnet-base-v2', device='cpu')
43
  except Exception as e:
 
91
  return JSONResponse({"best_model": best_model, "text": best_response, "all_responses": all_responses})
92
 
93
  if __name__ == "__main__":
94
+ uvicorn.run(app, host="0.0.0.0", port=7860)