sushantgo commited on
Commit
f90c66a
·
1 Parent(s): fcd87d3

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .env +1 -0
  2. README.md +2 -8
  3. chat-app.py +24 -0
  4. gradio-chat-app.py +37 -0
  5. langchain.py +28 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ API_KEY = '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'
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Simple Gradio Langchain App
3
- emoji: 🌖
4
- colorFrom: green
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 3.46.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Simple_Gradio_Langchain_App
3
+ app_file: langchain.py
 
 
4
  sdk: gradio
5
  sdk_version: 3.46.0
 
 
6
  ---
 
 
chat-app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+ from dotenv import load_dotenv
4
+
5
+ openai.api_key = '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'
6
+ message_history = []
7
+
8
+ def chat(userInput, role = 'user'):
9
+ message_history.append({'role': role, 'content': userInput})
10
+ completion = openai.ChatCompletion.create(
11
+ model = 'gpt-3.5-turbo',
12
+ messages = message_history
13
+ )
14
+ replContent = completion.choices[0].message.content
15
+ print(replContent)
16
+ message_history.append({'role': 'assistant', 'content': replContent })
17
+ return replContent
18
+
19
+ for i in range(2):
20
+ userInput = input('> :')
21
+ print(userInput)
22
+ print()
23
+ chat(userInput)
24
+ print()
gradio-chat-app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+
4
+ openai.api_key = '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'
5
+ message_history = []
6
+
7
+ # message_history = [
8
+ # {
9
+ # "role": "user",
10
+ # "content": f"You are a joke bot, but I'll specify a subject matter in messages, and you'll reply with a jokes that includes the subjects I mention in my messages. Reply only with jokes to further input, If you understand, say OK."},
11
+ # {
12
+ # "role": "assistant",
13
+ # "content": f"OK"
14
+ # }
15
+ # ]
16
+
17
+ def predict(input):
18
+ global message_history
19
+ message_history.append({'role': 'user', 'content': input})
20
+ completion = openai.ChatCompletion.create(
21
+ model = 'gpt-3.5-turbo',
22
+ messages = message_history
23
+ )
24
+ replContent = completion.choices[0].message.content
25
+ print(replContent)
26
+ message_history.append({'role': 'assistant', 'content': replContent })
27
+ response = [(message_history[i]['content'], message_history[i+1]['content']) for i in range(0, len(message_history)-1, 2)]
28
+ return response
29
+
30
+ with gr.Blocks() as d:
31
+ chatbot = gr.Chatbot()
32
+ with gr.Row():
33
+ textbox = gr.Textbox(show_lable = False, placeholder = "Type your message here").style(container = False)
34
+ textbox.submit(predict, textbox, chatbot)
35
+ textbox.submit(None, None, textbox, _js = "() => {''}")
36
+
37
+ d.launch()
langchain.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from dotenv import load_dotenv, find_dotenv
4
+ _ = load_dotenv(find_dotenv())
5
+
6
+ from langchain.chains import ConversationChain
7
+ from langchain.chat_models import ChatOpenAI
8
+ from langchain.memory import ConversationBufferMemory
9
+
10
+ llm = ChatOpenAI(temperature=0.0)
11
+ memory = ConversationBufferMemory()
12
+ conversion = ConversationChain(
13
+ llm=llm,
14
+ memory=memory,
15
+ verbose=False
16
+ )
17
+
18
+ def takeinput(name):
19
+ output_str = conversion.predict(input=name)
20
+ return output_str
21
+
22
+ demo = gr.Interface(
23
+ fn=takeinput,
24
+ inputs=["text"],
25
+ outputs=["text"]
26
+ )
27
+
28
+ demo.launch()