Tonic commited on
Commit
c89f40c
·
1 Parent(s): 69d5fa8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -19
app.py CHANGED
@@ -21,26 +21,24 @@ qa_relevance = Feedback(feedback_openai.relevance, name="Answer Relevance").on_i
21
  # Create a Tru object
22
  tru = Tru()
23
 
24
- # Initialize the HuggingFacePipeline for local LLM
25
- local_llm = HuggingFacePipeline.from_model_id(
26
- model_id="Tonic/stablemed",
27
- task="text-generation",
28
- model_kwargs={"temperature": 0.2, "top_p": 0.95, "max_length": 256}
29
- )
30
-
31
  # Set the window memory to go back 4 turns
32
  window_memory = ConversationBufferWindowMemory(k=4)
33
 
34
- # Create the ConversationChain with the given window memory
 
 
 
 
 
 
35
  conversation = ConversationChain(
36
- llm=local_llm,
37
  verbose=True,
38
  memory=window_memory
39
  )
40
 
41
- # Update the conversation prompt template to prime it as a gardening expert
42
  conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
43
-
44
  Current conversation:
45
  {history}
46
  Human: {input}
@@ -49,17 +47,13 @@ AI:'''
49
  # Wrap the conversation with TruChain to instrument it
50
  tc_conversation = tru.Chain(conversation, app_id='Trulens-StableMed', feedbacks=[qa_relevance])
51
 
52
- # Initialize Gradio Client
53
- client = Client("https://tonic-stablemed-chat.hf.space/")
54
-
55
  # Make a prediction using the wrapped conversation
56
- result = client.predict(
57
- "Howdy!", # str in 'user_input' Textbox component
58
- "Howdy!", # str in 'system_prompt' Textbox component
59
- api_name="/predict"
60
- )
61
 
62
  # Print the result
63
  print(result)
64
 
 
65
  tru.run_dashboard()
 
21
  # Create a Tru object
22
  tru = Tru()
23
 
 
 
 
 
 
 
 
24
  # Set the window memory to go back 4 turns
25
  window_memory = ConversationBufferWindowMemory(k=4)
26
 
27
+ # Define a custom function to interact with the Gradio client
28
+ def gradio_client_interaction(prompt):
29
+ client = Client("https://tonic-stablemed-chat.hf.space/")
30
+ result = client.predict(prompt, prompt, api_name="/predict")
31
+ return result['data'][0] # Assuming the response format
32
+
33
+ # Create the ConversationChain with the custom function
34
  conversation = ConversationChain(
35
+ llm=gradio_client_interaction,
36
  verbose=True,
37
  memory=window_memory
38
  )
39
 
40
+ # Update the conversation prompt template
41
  conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
 
42
  Current conversation:
43
  {history}
44
  Human: {input}
 
47
  # Wrap the conversation with TruChain to instrument it
48
  tc_conversation = tru.Chain(conversation, app_id='Trulens-StableMed', feedbacks=[qa_relevance])
49
 
 
 
 
50
  # Make a prediction using the wrapped conversation
51
+ user_input = "Howdy!"
52
+ system_prompt = "Howdy!"
53
+ result = tc_conversation.generate(user_input, system_prompt=system_prompt)
 
 
54
 
55
  # Print the result
56
  print(result)
57
 
58
+ # Run the TruLens dashboard
59
  tru.run_dashboard()