Spaces:
Sleeping
Sleeping
seawolf2357
commited on
Commit
โข
4509126
1
Parent(s):
0926d14
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,9 @@ intents.messages = True
|
|
14 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
15 |
hf_client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct", token=os.getenv("HF_TOKEN"))
|
16 |
|
|
|
|
|
|
|
17 |
class MyClient(discord.Client):
|
18 |
def __init__(self, *args, **kwargs):
|
19 |
super().__init__(*args, **kwargs)
|
@@ -26,7 +29,7 @@ class MyClient(discord.Client):
|
|
26 |
logging.info('์์ ์ ๋ฉ์์ง๋ ๋ฌด์ํฉ๋๋ค.')
|
27 |
return
|
28 |
|
29 |
-
logging.debug(f'Receiving message: {message.content}')
|
30 |
response = await generate_response(message.content)
|
31 |
await message.channel.send(response)
|
32 |
|
@@ -40,22 +43,25 @@ async def generate_response(user_input):
|
|
40 |
๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ์ญ์์ค.
|
41 |
"""
|
42 |
|
43 |
-
#
|
44 |
-
|
45 |
-
|
|
|
|
|
46 |
|
47 |
# ๋๊ธฐ ํจ์๋ฅผ ๋น๋๊ธฐ๋ก ์ฒ๋ฆฌํ๊ธฐ ์ํ ๋ํผ ์ฌ์ฉ, stream=true๋ก ๋ณ๊ฒฝ
|
48 |
loop = asyncio.get_event_loop()
|
49 |
response = await loop.run_in_executor(None, lambda: hf_client.chat_completion(
|
50 |
-
messages, max_tokens=200, stream=True, temperature=0.9, top_p=0.9))
|
51 |
|
52 |
# ์คํธ๋ฆฌ๋ฐ ์๋ต์ ์ฒ๋ฆฌํ๋ ๋ก์ง ์ถ๊ฐ
|
53 |
full_response = ""
|
54 |
for part in response:
|
55 |
full_response += part.choices[0].delta.content.strip()
|
56 |
|
57 |
-
|
58 |
|
|
|
59 |
return full_response
|
60 |
|
61 |
# ๋์ค์ฝ๋ ๋ด ์ธ์คํด์ค ์์ฑ ๋ฐ ์คํ
|
|
|
14 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
15 |
hf_client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct", token=os.getenv("HF_TOKEN"))
|
16 |
|
17 |
+
# ๋ํ ํ์คํ ๋ฆฌ๋ฅผ ์ ์ฅํ ๋ณ์
|
18 |
+
conversation_history = []
|
19 |
+
|
20 |
class MyClient(discord.Client):
|
21 |
def __init__(self, *args, **kwargs):
|
22 |
super().__init__(*args, **kwargs)
|
|
|
29 |
logging.info('์์ ์ ๋ฉ์์ง๋ ๋ฌด์ํฉ๋๋ค.')
|
30 |
return
|
31 |
|
32 |
+
logging.debug(f'Receiving message: {message.content}')
|
33 |
response = await generate_response(message.content)
|
34 |
await message.channel.send(response)
|
35 |
|
|
|
43 |
๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ์ญ์์ค.
|
44 |
"""
|
45 |
|
46 |
+
# ๋ํ ํ์คํ ๋ฆฌ ๊ด๋ฆฌ
|
47 |
+
global conversation_history
|
48 |
+
conversation_history.append({"role": "user", "content": user_input})
|
49 |
+
|
50 |
+
messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}] + conversation_history
|
51 |
|
52 |
# ๋๊ธฐ ํจ์๋ฅผ ๋น๋๊ธฐ๋ก ์ฒ๋ฆฌํ๊ธฐ ์ํ ๋ํผ ์ฌ์ฉ, stream=true๋ก ๋ณ๊ฒฝ
|
53 |
loop = asyncio.get_event_loop()
|
54 |
response = await loop.run_in_executor(None, lambda: hf_client.chat_completion(
|
55 |
+
messages, max_tokens=200, stream=True, temperature=0.9, top_p=0.9))
|
56 |
|
57 |
# ์คํธ๋ฆฌ๋ฐ ์๋ต์ ์ฒ๋ฆฌํ๋ ๋ก์ง ์ถ๊ฐ
|
58 |
full_response = ""
|
59 |
for part in response:
|
60 |
full_response += part.choices[0].delta.content.strip()
|
61 |
|
62 |
+
conversation_history.append({"role": "assistant", "content": full_response})
|
63 |
|
64 |
+
logging.debug(f'Model response: {full_response}')
|
65 |
return full_response
|
66 |
|
67 |
# ๋์ค์ฝ๋ ๋ด ์ธ์คํด์ค ์์ฑ ๋ฐ ์คํ
|