xsa-dev commited on
Commit
effb069
·
1 Parent(s): 637df7e
Files changed (2) hide show
  1. app.py +17 -15
  2. requeirements.txt +2 -0
app.py CHANGED
@@ -1,17 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoModel, pipeline
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
-
6
-
7
- pipe = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf")
8
-
9
- # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
10
- # model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
11
-
12
-
13
- def greet(name):
14
- return "Hello " + name + "!!"
15
-
16
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
17
- iface.launch()
 
 
 
 
 
1
  import gradio as gr
 
 
2
 
3
+ title = "LLAMA2"
4
+ description = "LLAMA2"
5
+ article = "<p style='text-align: center'>test ...</p>"
6
+ examples = [
7
+ ["Test message 1"],
8
+ ["What you can?"],
9
+ ["Гладкая Бореальная низина на северном полушарии занимает 40%"],
10
+ ]
11
+ gr.Interface.load(
12
+ "huggingface/meta-llama/Llama-2-7b-chat-hf",
13
+ inputs=gr.Textbox(lines=5, label="Входной текст"),
14
+ title=title,
15
+ description=description,
16
+ article=article,
17
+ examples=examples,
18
+ enable_queue=True,
19
+ ).launch()
requeirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch