File size: 1,470 Bytes
b916cdf
c53513a
b916cdf
 
c53513a
 
 
 
 
 
 
 
 
b916cdf
0ad7c15
 
c53513a
0ad7c15
c53513a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273a659
 
e94c1c4
273a659
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
from fastapi import FastAPI
from huggingface_hub import InferenceClient

app = FastAPI()
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

def format_prompt(message, history):
  prompt = "<s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

@app.get("/Genera")
def read_root(input):
    history = []  # Puoi definire la history se necessario
    generated_response = next(generate(input, history))  # Ottieni la risposta generata
    return {"response": generated_response}  # Restituisci la risposta generata come JSON

def generate(prompt, history, temperature=0.2, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )
    formatted_prompt = format_prompt(prompt, history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)

    # Ottieni l'output completo usando next() sull'iteratore
    generated_output = next(stream).token.text

    return generated_output  # Restituisci l'intera sequenza generata