Tonic commited on
Commit
69d5fa8
·
1 Parent(s): 049b37d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,8 +3,8 @@ from trulens_eval.schema import Select
3
  from trulens_eval.tru import Tru
4
  from trulens_eval.feedback import Feedback
5
  from trulens_eval.feedback import OpenAI as Feedback_OpenAI
6
- from langchain import HuggingFacePipeline
7
  from langchain.llms import HuggingFacePipeline
 
8
  from langchain.chains import ConversationChain
9
  from langchain.chains.conversation.memory import ConversationBufferWindowMemory
10
  import os
@@ -23,9 +23,9 @@ tru = Tru()
23
 
24
  # Initialize the HuggingFacePipeline for local LLM
25
  local_llm = HuggingFacePipeline.from_model_id(
26
- model_id="chavinlo/alpaca-native",
27
  task="text-generation",
28
- model_kwargs={"temperature": 0.6, "top_p": 0.95, "max_length": 256}
29
  )
30
 
31
  # Set the window memory to go back 4 turns
@@ -39,7 +39,7 @@ conversation = ConversationChain(
39
  )
40
 
41
  # Update the conversation prompt template to prime it as a gardening expert
42
- conversation.prompt.template = '''The following is a friendly conversation between a human and an AI gardening expert. The AI is an expert on gardening and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
43
 
44
  Current conversation:
45
  {history}
@@ -47,7 +47,7 @@ Human: {input}
47
  AI:'''
48
 
49
  # Wrap the conversation with TruChain to instrument it
50
- tc_conversation = tru.Chain(conversation, app_id='GardeningAIwithMemory_v1', feedbacks=[qa_relevance])
51
 
52
  # Initialize Gradio Client
53
  client = Client("https://tonic-stablemed-chat.hf.space/")
 
3
  from trulens_eval.tru import Tru
4
  from trulens_eval.feedback import Feedback
5
  from trulens_eval.feedback import OpenAI as Feedback_OpenAI
 
6
  from langchain.llms import HuggingFacePipeline
7
+ from langchain.prompts import PromptTemplate
8
  from langchain.chains import ConversationChain
9
  from langchain.chains.conversation.memory import ConversationBufferWindowMemory
10
  import os
 
23
 
24
  # Initialize the HuggingFacePipeline for local LLM
25
  local_llm = HuggingFacePipeline.from_model_id(
26
+ model_id="Tonic/stablemed",
27
  task="text-generation",
28
+ model_kwargs={"temperature": 0.2, "top_p": 0.95, "max_length": 256}
29
  )
30
 
31
  # Set the window memory to go back 4 turns
 
39
  )
40
 
41
  # Update the conversation prompt template to prime it as a gardening expert
42
+ conversation.prompt.template = '''The following is a consult between a clinical consultant and a public health and medical expert. The AI is an expert on medicine and public health and gives recommendations specific to location and conditions. If the AI does not know the answer to a question, it truthfully says it does not know.
43
 
44
  Current conversation:
45
  {history}
 
47
  AI:'''
48
 
49
  # Wrap the conversation with TruChain to instrument it
50
+ tc_conversation = tru.Chain(conversation, app_id='Trulens-StableMed', feedbacks=[qa_relevance])
51
 
52
  # Initialize Gradio Client
53
  client = Client("https://tonic-stablemed-chat.hf.space/")