File size: 1,829 Bytes
0aba7e2
 
9c9ed59
4dadddb
0aba7e2
9c9ed59
0aba7e2
 
9c9ed59
0aba7e2
 
 
 
 
 
 
 
99cf388
0aba7e2
 
 
 
 
 
 
 
9c9ed59
0aba7e2
343b082
9c9ed59
0aba7e2
 
9c9ed59
0aba7e2
 
 
99cf388
 
20c1d60
0aba7e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99cf388
dcdf225
 
20c1d60
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from flask import Flask, request, jsonify
from huggingface_hub import InferenceClient

app = Flask(__name__)
app.config["DEBUG"] = True  # Enable for debugging

# Load model client
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

# Function for text generation with enhanced prompt formatting
def generate(
    prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    # Enhanced prompt formatting for better context
    formatted_prompt = f"{system_prompt}\n{', '.join(f'{user_prompt} ||| {bot_response}' for user_prompt, bot_response in history)}\n{prompt}"

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
    return output


@app.route("/generate", methods=["POST"])
def generate_text():
    data = request.json
    prompt = data.get("prompt")
    history = data.get("history", [])
    system_prompt = data.get("system_prompt")
    temperature = data.get("temperature", 0.9)
    max_new_tokens = data.get("max_new_tokens", 256)
    top_p = data.get("top_p", 0.95)
    repetition_penalty = data.get("repetition_penalty", 1.0)

    response = generate(
        prompt, history, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty
    )

    return jsonify({"response": response})


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=7860)