ldhldh commited on
Commit
98e337c
β€’
1 Parent(s): 8625f54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -40,10 +40,10 @@ llm = Llama(model_path = 'Llama-2-ko-7B-chat-gguf-q4_0.bin',
40
  )
41
  # App code
42
  def chat(x):
43
- prom = f"λ‹€μŒμ€ A와 B의 역할극이야. λ„ˆλŠ” Bμ•Ό. A와 λŒ€ν™”ν•˜κ³  μžˆμ–΄. μΉœκ΅¬μ—κ²Œ μΉœκ·Όν•˜κ³  κ°„κ²°ν•˜κ²Œ 잘 λŒ€λ‹΅ν•΄μ€˜.\n\n### A:\n{x}\n\n### B:\n"
44
- output = llm(prom, max_tokens=20, stop=["###"], echo=True)
45
- return output['choices'][0]['text'][len(prom):-1]
46
-
47
 
48
  with gr.Blocks() as demo:
49
  count = 0
 
40
  )
41
  # App code
42
  def chat(x):
43
+ #prom = f"λ‹€μŒμ€ A와 B의 역할극이야. λ„ˆλŠ” Bμ•Ό. A와 λŒ€ν™”ν•˜κ³  μžˆμ–΄. μΉœκ΅¬μ—κ²Œ μΉœκ·Όν•˜κ³  κ°„κ²°ν•˜κ²Œ 잘 λŒ€λ‹΅ν•΄μ€˜.\n\n### A:\n{x}\n\n### B:\n"
44
+ #output = llm(prom, max_tokens=20, stop=["###"], echo=True)
45
+ #return output['choices'][0]['text'][len(prom):-1]
46
+ return "AI μ‘λ‹΅μž…λ‹ˆλ‹€."
47
 
48
  with gr.Blocks() as demo:
49
  count = 0