Kongfha commited on
Commit
e9295c4
·
1 Parent(s): 1e777a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -40
app.py CHANGED
@@ -1,43 +1,4 @@
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
- model_name = "Kongfha/PhraAphaiManee-LM"
5
- tokenizer = AutoTokenizer.from_pretrained(model_name)
6
- model = AutoModelForCausalLM.from_pretrained(model_name)
7
-
8
- nlp = pipeline("text-generation",
9
- model=model,
10
- tokenizer=tokenizer)
11
-
12
- def generate(input_sentence, max_length=140, top_k=50, temperature=1.0):
13
- generated_text = nlp(input_sentence,
14
- max_length=max_length,
15
- do_sample=True,
16
- top_k=top_k,
17
- temperature=temperature)
18
- return generated_text[0]['generated_text']
19
-
20
- inputs = [
21
- gr.inputs.Textbox(label="Input Sentence"),
22
- gr.inputs.Number(default=140, label="Max Length"),
23
- gr.inputs.Number(default=50, label="Top K"),
24
- gr.inputs.Slider(minimum=0.1, maximum=2.0, default=1.0, label="Temperature", step=0.1),
25
- ]
26
-
27
- outputs = gr.outputs.Textbox(label="Generated Text")
28
-
29
- examples = [
30
- ["๏ เรือล่อง", 100, 20, 0.5],
31
- ["๏ แม้นชีวี", 120, 30, 0.7],
32
- ["๏ หากวันใด", 140, 50, 1.0],
33
- ["๏ หากจำเป็น", 160, 70, 1.5]
34
- ]
35
-
36
- iface = gr.Interface(
37
- fn=generate,
38
- inputs=inputs,
39
- outputs=outputs,
40
- examples=examples
41
- )
42
-
43
- iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
+ print("yeahhh")