AThirumoorthi commited on
Commit
34ff726
·
verified ·
1 Parent(s): f37aba1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -26
app.py CHANGED
@@ -1,35 +1,74 @@
1
  import os
 
2
  import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
 
7
- OPENAI_API_KEY="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"
8
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
 
9
 
10
- template = """You are a helpful assistant to answer all user queries.
11
- {chat_history}
12
- User: {user_message}
13
- Chatbot:"""
 
 
14
 
15
- prompt = PromptTemplate(
16
- input_variables=["chat_history", "user_message"], template=template
17
- )
18
 
19
- memory = ConversationBufferMemory(memory_key="chat_history")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- llm_chain = LLMChain(
22
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
23
- prompt=prompt,
24
- verbose=True,
25
- memory=memory,
26
- )
27
-
28
- def get_text_response(user_message,history):
29
- response = llm_chain.predict(user_message = user_message)
30
- return response
31
-
32
- demo = gr.ChatInterface(get_text_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  if __name__ == "__main__":
35
- demo.launch()
 
1
  import os
2
+ from groq import Groq
3
  import gradio as gr
4
+ import logging
 
 
5
 
6
+ # Set up logging
7
+ logging.basicConfig(level=logging.DEBUG)
8
+ logger = logging.getLogger(__name__)
9
 
10
+ # Initialize the Groq client
11
+ api_key = os.environ.get("GROQ_API_KEY")
12
+ if not api_key:
13
+ logger.error("GROQ_API_KEY environment variable is not set.")
14
+ raise ValueError("GROQ_API_KEY environment variable is required.")
15
+ client = Groq(api_key=api_key)
16
 
17
+ MODEL_NAME = os.environ.get("MODEL_NAME", "llama3-8b-8192")
 
 
18
 
19
+ # Define a function to handle chat completions
20
+ def get_completion(user_input):
21
+ try:
22
+ completion = client.chat.completions.create(
23
+ model=MODEL_NAME,
24
+ messages=[
25
+ {"role": "system", "content": "You are a helpful assistant."},
26
+ {"role": "user", "content": user_input}
27
+ ],
28
+ temperature=1,
29
+ max_tokens=1024,
30
+ top_p=1,
31
+ stream=True,
32
+ stop=None,
33
+ )
34
+
35
+ response = ""
36
+ for chunk in completion:
37
+ response += chunk.choices[0].delta.content or ""
38
+
39
+ return response
40
+ except Exception as e:
41
+ logger.error(f"Error during completion: {e}")
42
+ return "Sorry, I encountered an error while processing your request."
43
 
44
+ # Launch Gradio interface
45
+ def launch_interface():
46
+ demo = gr.Interface(
47
+ fn=get_completion,
48
+ inputs=gr.Textbox(
49
+ label="Enter your query:",
50
+ placeholder="Ask me anything...",
51
+ lines=2,
52
+ max_lines=5,
53
+ show_label=True,
54
+ interactive=True
55
+ ),
56
+ outputs=gr.Textbox(
57
+ label="Response:",
58
+ interactive=False,
59
+ show_label=True,
60
+ lines=6,
61
+ max_lines=10
62
+ ),
63
+ title="Mr AI",
64
+ description="Ask anything and get a helpful response.",
65
+ theme="default",
66
+ css=".gr-box { border-radius: 10px; border: 1px solid #ccc; padding: 10px; }",
67
+ allow_flagging="never"
68
+ )
69
+
70
+ logger.info("Starting Gradio interface")
71
+ demo.launch(share=True)
72
 
73
  if __name__ == "__main__":
74
+ launch_interface()