gaelhuser commited on
Commit
1cc6b59
·
verified ·
1 Parent(s): 39e2ed3

Update app.py

Browse files

change to groq

Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -6,6 +6,9 @@
6
 
7
  #demo = gr.load("gaelhuser/patent_gen_prv", hf_token=HF_Hub_API_token, src="spaces")
8
  #demo.launch()
 
 
 
9
 
10
  import gradio as gr
11
 
@@ -19,14 +22,19 @@ import os
19
  HF_Hub_API_token = os.environ.get('HF_Hub_API_token', None)
20
  github_token = os.environ.get('github_token', None)
21
 
22
- llm = HuggingFaceEndpoint(
23
- repo_id="mistralai/Mistral-7B-Instruct-v0.3",
24
- task="text-generation",
25
- max_new_tokens=2048,
26
- do_sample=False,
27
- repetition_penalty=1.03,
28
- huggingfacehub_api_token=HF_Hub_API_token
29
- )
 
 
 
 
 
30
 
31
  memory = ConversationBufferMemory()
32
  conversation = ConversationChain(llm=llm, verbose=False, memory=memory)
 
6
 
7
  #demo = gr.load("gaelhuser/patent_gen_prv", hf_token=HF_Hub_API_token, src="spaces")
8
  #demo.launch()
9
+ HF_Hub_API_token = os.environ.get('HF_Hub_API_token', None)
10
+ GROQ_API_KEY = os.environ.get('GROQ_API_KEY', None)
11
+
12
 
13
  import gradio as gr
14
 
 
22
  HF_Hub_API_token = os.environ.get('HF_Hub_API_token', None)
23
  github_token = os.environ.get('github_token', None)
24
 
25
+ #llm = HuggingFaceEndpoint(
26
+ # repo_id="mistralai/Mistral-7B-Instruct-v0.3",
27
+ # task="text-generation",
28
+ # max_new_tokens=2048,
29
+ # do_sample=False,
30
+ # repetition_penalty=1.03,
31
+ # huggingfacehub_api_token=HF_Hub_API_token
32
+ #)
33
+
34
+ from langchain_groq import ChatGroq
35
+ os.environ['GROQ_API_KEY'] = GROQ_API_KEY
36
+ llm = ChatGroq(model=" llama-3.1-8b-instant", max_tokens=2048, temperature=0.0,)
37
+
38
 
39
  memory = ConversationBufferMemory()
40
  conversation = ConversationChain(llm=llm, verbose=False, memory=memory)