camparchimedes commited on
Commit
4322daa
ยท
verified ยท
1 Parent(s): 7f4c041

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -12
app.py CHANGED
@@ -21,6 +21,8 @@ from langchain.chains import LLMChain, APIChain
21
  from langchain_core.prompts import PromptTemplate
22
  from langchain.memory.buffer import ConversationBufferMemory
23
 
 
 
24
  from langchain_community.llms import HuggingFaceHub
25
  from langchain_huggingface import HuggingFacePipeline
26
  from langchain_huggingface import HuggingFaceEndpoint
@@ -94,22 +96,25 @@ api_response_prompt = PromptTemplate(
94
 
95
  @cl.on_chat_start
96
  def setup_multiple_chains():
 
 
 
97
 
98
- llm = HuggingFaceEndpoint(
99
  #repo_id="google/gemma-2-2b", #"norallm/normistral-7b-warm-instruct",
100
  #endpoint_url="http://localhost:8010/",
101
- model="google/gemma-2-2b",
102
- max_new_tokens=512,
103
- top_k=10,
104
- top_p=0.95,
105
- typical_p=0.95,
106
- temperature=0.7,
107
- repetition_penalty=1.03,
108
- huggingfacehub_api_token=HUGGINGFACE_API_TOKEN,
109
- task="text-generation"
110
- )
 
111
 
112
-
113
  #llm = HuggingFacePipeline.from_model_id(
114
  #model_id="normistral-7b-warm-instruct",
115
  #task="text-generation",
 
21
  from langchain_core.prompts import PromptTemplate
22
  from langchain.memory.buffer import ConversationBufferMemory
23
 
24
+ from langchain_openai import OpenAI
25
+
26
  from langchain_community.llms import HuggingFaceHub
27
  from langchain_huggingface import HuggingFacePipeline
28
  from langchain_huggingface import HuggingFaceEndpoint
 
96
 
97
  @cl.on_chat_start
98
  def setup_multiple_chains():
99
+
100
+ llm = OpenAI(model='gpt-3.5-turbo-instruct',
101
+ temperature=0.7)
102
 
103
+ #llm = HuggingFaceEndpoint(
104
  #repo_id="google/gemma-2-2b", #"norallm/normistral-7b-warm-instruct",
105
  #endpoint_url="http://localhost:8010/",
106
+
107
+ #model="google/gemma-2-2b",
108
+ #max_new_tokens=512,
109
+ #top_k=10,
110
+ #top_p=0.95,
111
+ #typical_p=0.95,
112
+ #temperature=0.7,
113
+ #repetition_penalty=1.03,
114
+ #huggingfacehub_api_token=HUGGINGFACE_API_TOKEN,
115
+ #task="text-generation"
116
+ #)
117
 
 
118
  #llm = HuggingFacePipeline.from_model_id(
119
  #model_id="normistral-7b-warm-instruct",
120
  #task="text-generation",