PhantHive commited on
Commit
8f596ab
·
1 Parent(s): 3111c2c

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -26
app.py DELETED
@@ -1,26 +0,0 @@
1
- import gradio as gr
2
- from peft import PeftModel, PeftConfig
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import torch
5
-
6
- # Load the model and config when the script starts
7
- config = PeftConfig.from_pretrained("PhantHive/bigbrain")
8
- model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
9
- model = PeftModel.from_pretrained(model, "PhantHive/bigbrain")
10
-
11
- # Load the tokenizer
12
- tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
13
-
14
-
15
- def greet(text):
16
- batch = tokenizer(f"'{text}' ->: ", return_tensors='pt')
17
-
18
- # Use torch.no_grad to disable gradient calculation
19
- with torch.no_grad():
20
- output_tokens = model.generate(**batch, do_sample=True, max_new_tokens=50)
21
-
22
- return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
23
-
24
-
25
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
26
- iface.launch()