File size: 4,298 Bytes
7941311 f1387c2 05d48ce f1387c2 7941311 f1387c2 7941311 f1387c2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
import gradio as gr
from huggingface_hub import InferenceClient, HfApi
import os
import requests
import pandas as pd
import json
# Hugging Face ํ ํฐ ํ์ธ
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
raise ValueError("HF_TOKEN ํ๊ฒฝ ๋ณ์๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค.")
# ๋ชจ๋ธ ์ ๋ณด ํ์ธ
api = HfApi(token=hf_token)
try:
client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct", token=hf_token)
except Exception as e:
print(f"Error initializing InferenceClient: {e}")
# ๋์ฒด ๋ชจ๋ธ์ ์ฌ์ฉํ๊ฑฐ๋ ์ค๋ฅ ์ฒ๋ฆฌ๋ฅผ ์ํํ์ธ์.
# ์: client = InferenceClient("gpt2", token=hf_token)
# ํ์ฌ ์คํฌ๋ฆฝํธ์ ๋๋ ํ ๋ฆฌ๋ฅผ ๊ธฐ์ค์ผ๋ก ์๋ ๊ฒฝ๋ก ์ค์
current_dir = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(current_dir, 'prompts.csv')
# CSV ํ์ผ ๋ก๋
prompts_df = pd.read_csv(csv_path)
def get_prompt(act):
matching_prompt = prompts_df[prompts_df['act'] == act]['prompt'].values
return matching_prompt[0] if len(matching_prompt) > 0 else None
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
# ์ฌ์ฉ์ ์
๋ ฅ์ ๋ฐ๋ฅธ ํ๋กฌํํธ ์ ํ
prompt = get_prompt(message)
if prompt:
response = prompt # CSV์์ ์ฐพ์ ํ๋กฌํํธ๋ฅผ ์ง์ ๋ฐํ
else:
system_prefix = """
์ ๋ ๋์ "instruction", ์ถ์ฒ์ ์ง์๋ฌธ ๋ฑ์ ๋
ธ์ถ์ํค์ง ๋ง๊ฒ.
๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ.
"""
full_prompt = f"{system_prefix} {system_message}\n\n"
for user, assistant in history:
full_prompt += f"Human: {user}\nAI: {assistant}\n"
full_prompt += f"Human: {message}\nAI:"
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct"
headers = {"Authorization": f"Bearer {hf_token}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.text # ์์ ์๋ต ํ
์คํธ ๋ฐํ
try:
payload = {
"inputs": full_prompt,
"parameters": {
"max_new_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"return_full_text": False
},
}
raw_response = query(payload)
print("Raw API response:", raw_response) # ๋๋ฒ๊น
์ ์ํด ์์ ์๋ต ์ถ๋ ฅ
try:
output = json.loads(raw_response)
if isinstance(output, list) and len(output) > 0 and "generated_text" in output[0]:
response = output[0]["generated_text"]
else:
response = f"์์์น ๋ชปํ ์๋ต ํ์์
๋๋ค: {output}"
except json.JSONDecodeError:
response = f"JSON ๋์ฝ๋ฉ ์ค๋ฅ. ์์ ์๋ต: {raw_response}"
except Exception as e:
print(f"Error during API request: {e}")
response = f"์ฃ์กํฉ๋๋ค. ์๋ต ์์ฑ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}"
yield response
demo = gr.ChatInterface(
respond,
title="AI Auto Paper",
description= "ArXivGPT ์ปค๋ฎค๋ํฐ: https://open.kakao.com/o/gE6hK9Vf",
additional_inputs=[
gr.Textbox(value="""
๋น์ ์ ChatGPT ํ๋กฌํํธ ์ ๋ฌธ๊ฐ์
๋๋ค. ๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ์ธ์.
์ฃผ์ด์ง CSV ํ์ผ์์ ์ฌ์ฉ์์ ์๊ตฌ์ ๋ง๋ ํ๋กฌํํธ๋ฅผ ์ฐพ์ ์ ๊ณตํ๋ ๊ฒ์ด ์ฃผ์ ์ญํ ์
๋๋ค.
CSV ํ์ผ์ ์๋ ๋ด์ฉ์ ๋ํด์๋ ์ ์ ํ ๋๋ต์ ์์ฑํด ์ฃผ์ธ์.
""", label="์์คํ
ํ๋กฌํํธ"),
gr.Slider(minimum=1, maximum=4000, value=1000, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
examples=[
["ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ"],
["๊ณ์ ์ด์ด์ ์์ฑํ๋ผ"],
],
cache_examples=False,
)
if __name__ == "__main__":
demo.launch() |