Spaces:
Sleeping
Sleeping
Commit
·
64b5a1f
1
Parent(s):
05a057d
arreglos 2
Browse files
app.py
CHANGED
@@ -1,56 +1,37 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
import gradio as gr
|
|
|
3 |
from typing import List, Tuple, Dict
|
4 |
-
import torch
|
5 |
|
6 |
-
|
7 |
-
model_name = "AuriLab/gpt-bi-instruct-cesar"
|
8 |
-
tokenizer_name = "AuriLab/gpt-bi"
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
10 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
11 |
|
12 |
-
def format_messages(history: List[Tuple[str, str]], system_message: str, user_message: str) -> str:
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
if
|
19 |
-
|
20 |
-
|
21 |
-
return
|
22 |
|
23 |
-
def respond(message: str, history: List[Tuple[str, str]]) -> str:
|
24 |
-
|
25 |
-
|
26 |
-
2. Avoid repeating the same words or phrases
|
27 |
-
3. Use synonyms and alternative expressions
|
28 |
-
4. Be concise and direct"""
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
do_sample=True,
|
41 |
-
pad_token_id=tokenizer.pad_token_id,
|
42 |
-
eos_token_id=tokenizer.eos_token_id,
|
43 |
-
)
|
44 |
-
|
45 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
46 |
-
# Extract only the assistant's response
|
47 |
-
response = response.split("Assistant:")[-1].strip()
|
48 |
-
|
49 |
-
return response
|
50 |
|
51 |
-
# Create the Gradio interface with custom title
|
52 |
demo = gr.ChatInterface(
|
53 |
-
|
54 |
title="Demo GPT-BI instruct",
|
55 |
)
|
56 |
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
from typing import List, Tuple, Dict
|
|
|
4 |
|
5 |
+
client = InferenceClient("AuriLab/gpt-bi-instruct-cesar")
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
def format_messages(history: List[Tuple[str, str]], system_message: str, user_message: str) -> List[Dict[str, str]]:
|
8 |
+
messages = [{"role": "system", "content": system_message}]
|
9 |
+
messages.extend([
|
10 |
+
{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
|
11 |
+
for turn in history
|
12 |
+
for i, msg in enumerate(turn)
|
13 |
+
if msg
|
14 |
+
])
|
15 |
+
messages.append({"role": "user", "content": user_message})
|
16 |
+
return messages
|
17 |
|
18 |
+
def respond(message: str, history: List[Tuple[str, str]], system_message: str, max_tokens: int, temperature: float, top_p: float) -> str:
|
19 |
+
messages = format_messages(history, system_message, message)
|
20 |
+
response = ""
|
|
|
|
|
|
|
21 |
|
22 |
+
for msg in client.chat_completion(
|
23 |
+
messages,
|
24 |
+
max_tokens=max_tokens,
|
25 |
+
stream=True,
|
26 |
+
temperature=0.7, # Aumentado para más variedad
|
27 |
+
top_p=0.85, # Ajustado para mejor balance
|
28 |
+
):
|
29 |
+
token = msg.choices[0].delta.content
|
30 |
+
response += token
|
31 |
+
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
|
|
33 |
demo = gr.ChatInterface(
|
34 |
+
respond,
|
35 |
title="Demo GPT-BI instruct",
|
36 |
)
|
37 |
|