MatteoScript commited on
Commit
a061413
·
1 Parent(s): 273a659

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +9 -3
main.py CHANGED
@@ -18,6 +18,10 @@ def read_root(input):
18
  generated_response = next(generate(input, history)) # Ottieni la risposta generata
19
  return {"response": generated_response} # Restituisci la risposta generata come JSON
20
 
 
 
 
 
21
  def generate(prompt, history, temperature=0.2, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
22
  temperature = float(temperature)
23
  if temperature < 1e-2:
@@ -35,7 +39,9 @@ def generate(prompt, history, temperature=0.2, max_new_tokens=30000, top_p=0.95,
35
  formatted_prompt = format_prompt(prompt, history)
36
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
 
38
- # Ottieni l'output completo usando next() sull'iteratore
39
- generated_output = next(stream).token.text
 
 
40
 
41
- return generated_output # Restituisci l'intera sequenza generata
 
18
  generated_response = next(generate(input, history)) # Ottieni la risposta generata
19
  return {"response": generated_response} # Restituisci la risposta generata come JSON
20
 
21
+ @app.get("/")
22
+ def read_general():
23
+ return {"response": "Benvenuto. Per maggiori info vai a /docs"} # Restituisci la risposta generata come JSON
24
+
25
  def generate(prompt, history, temperature=0.2, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
26
  temperature = float(temperature)
27
  if temperature < 1e-2:
 
39
  formatted_prompt = format_prompt(prompt, history)
40
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
41
 
42
+ # Accumula l'output in una lista
43
+ output_list = []
44
+ for response in stream:
45
+ output_list.append(response.token.text)
46
 
47
+ return iter(output_list) # Restituisci la lista come un iteratore