acbdkk commited on
Commit
15b386c
·
1 Parent(s): 28ed4b3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -0
app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ from petals import AutoDistributedModelForCausalLM
3
+ import streamlit as st
4
+ # Choose any model available at https://health.petals.dev
5
+ model_name = "petals-team/StableBeluga2" # This one is fine-tuned Llama 2 (70B)
6
+
7
+ # Connect to a distributed network hosting model layers
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
10
+ system_prompt = "### System:\nYou are Stable Beluga, an AI that is very precise. Be as accurate as you can.\n\n"
11
+
12
+ message = st.chat_input('Message')
13
+ if message:
14
+ prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n"
15
+ # Run the model as if it were on your computer
16
+ inputs = tokenizer(prompt, return_tensors="pt")["input_ids"]
17
+ outputs = model.generate(inputs, max_new_tokens=256)
18
+ st.write(tokenizer.decode(outputs[0])[3:-4])