Tonic commited on
Commit
5794f93
·
1 Parent(s): 27fb0be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -3
app.py CHANGED
@@ -9,6 +9,9 @@ from langchain.chains import ConversationChain
9
  from langchain.chains.conversation.memory import ConversationBufferWindowMemory
10
  import os
11
 
 
 
 
12
  # Access environment variables
13
  openai_api_key = os.environ.get("OPENAI_API_KEY")
14
  huggingface_api_token = os.environ.get("HUGGINGFACE_API_TOKEN")
@@ -30,13 +33,24 @@ def gradio_client_interaction(prompt):
30
  result = client.predict(prompt, prompt, api_name="/predict")
31
  return result['data'][0] # Assuming the response format
32
 
33
- # Create the ConversationChain with the custom function
 
 
 
 
 
 
 
 
 
 
 
 
34
  conversation = ConversationChain(
35
- llm=gradio_client_interaction,
36
  verbose=True,
37
  memory=window_memory
38
  )
39
-
40
  # Update the conversation prompt template
41
  conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
42
  Current conversation:
 
9
  from langchain.chains.conversation.memory import ConversationBufferWindowMemory
10
  import os
11
 
12
+ from langchain.llms import Runnable
13
+
14
+
15
  # Access environment variables
16
  openai_api_key = os.environ.get("OPENAI_API_KEY")
17
  huggingface_api_token = os.environ.get("HUGGINGFACE_API_TOKEN")
 
33
  result = client.predict(prompt, prompt, api_name="/predict")
34
  return result['data'][0] # Assuming the response format
35
 
36
+ class GradioLLM(Runnable):
37
+ def __init__(self, client_url):
38
+ self.client = Client(client_url)
39
+
40
+ def generate(self, prompt, **kwargs):
41
+ # Assuming the API expects 'prompt' and 'system_prompt' as inputs
42
+ result = self.client.predict(prompt, prompt, api_name="/predict")
43
+ return result['data'][0] # Adjust based on the actual response format
44
+
45
+ # Initialize the GradioLLM with the URL
46
+ gradio_llm = GradioLLM("https://tonic-stablemed-chat.hf.space/")
47
+
48
+ # Create the ConversationChain with the GradioLLM
49
  conversation = ConversationChain(
50
+ llm=gradio_llm,
51
  verbose=True,
52
  memory=window_memory
53
  )
 
54
  # Update the conversation prompt template
55
  conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
56
  Current conversation: