tkdehf2 commited on
Commit
e70eab6
ยท
verified ยท
1 Parent(s): 0810923

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -35
app.py CHANGED
@@ -1,35 +1,55 @@
1
- from transformers import pipeline
2
-
3
- # ๊ฐ์ • ๋ถ„๋ฅ˜ ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑ
4
- classifier = pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")
5
-
6
- # ๊ฐ์ • ๋ถ„๋ฅ˜ ํ•จ์ˆ˜ ์ •์˜
7
- def classify_emotion(text):
8
- result = classifier(text)[0]
9
- label = result['label']
10
- score = result['score']
11
- return label, score
12
-
13
- # ์ผ๊ธฐ ์ƒ์„ฑ ํ•จ์ˆ˜ ์ •์˜
14
- def generate_diary(emotion):
15
- prompts = {
16
- "positive": "์˜ค๋Š˜์€ ์ •๋ง ์ข‹์€ ๋‚ ์ด์—ˆ์–ด์š”. ",
17
- "negative": "์˜ค๋Š˜์€ ํž˜๋“  ํ•˜๋ฃจ์˜€์–ด์š”. ",
18
- "neutral": "์˜ค๋Š˜์€ ๊ทธ๋ƒฅ ํ‰๋ฒ”ํ•œ ํ•˜๋ฃจ์˜€์–ด์š”. "
19
- }
20
- prompt = prompts.get(emotion, "์˜ค๋Š˜์€ ๊ธฐ๋ถ„์ด ๋ณต์žกํ•œ ๋‚ ์ด์—ˆ์–ด์š”. ")
21
- diary = prompt + "์˜ค๋Š˜์˜ ์ผ๊ธฐ๋ฅผ ๋งˆ์นฉ๋‹ˆ๋‹ค."
22
- return diary
23
-
24
- # ์‚ฌ์šฉ์ž ์ž…๋ ฅ ๋ฐ›๊ธฐ
25
- user_input = input("์˜ค๋Š˜์˜ ๊ฐ์ •์„ ํ•œ ๋ฌธ์žฅ์œผ๋กœ ํ‘œํ˜„ํ•ด์ฃผ์„ธ์š”: ")
26
-
27
- # ๊ฐ์ • ๋ถ„๋ฅ˜
28
- emotion_label, _ = classify_emotion(user_input)
29
-
30
- # ๊ฐ์ • ๊ธฐ๋ฐ˜ ์ผ๊ธฐ ์ƒ์„ฑ
31
- diary = generate_diary(emotion_label)
32
-
33
- # ์ƒ์„ฑ๋œ ์ผ๊ธฐ ์ถœ๋ ฅ
34
- print("=== ์ƒ์„ฑ๋œ ์ผ๊ธฐ ===")
35
- print(diary)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
+
3
+ def generate_diary(emotion, num_samples=1, max_length=100, temperature=0.7):
4
+ # ๊ฐ์ •์„ ๊ธฐ๋ฐ˜์œผ๋กœ ์ผ๊ธฐ๋ฅผ ์ƒ์„ฑํ•  ํ† ํฌ๋‚˜์ด์ €์™€ ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ
5
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
6
+ model = GPT2LMHeadModel.from_pretrained("gpt2")
7
+
8
+ # ๊ฐ์ •์— ๋”ฐ๋ผ prefix ๋ฌธ์žฅ ์ƒ์„ฑ
9
+ if emotion == "happy":
10
+ prefix = "์˜ค๋Š˜์€ ๊ธฐ๋ถ„์ด ์ข‹์•„์š”. "
11
+ elif emotion == "sad":
12
+ prefix = "์Šฌํ”ˆ ๊ธฐ๋ถ„์ด์—์š”. "
13
+ elif emotion == "angry":
14
+ prefix = "ํ™”๊ฐ€ ์น˜๋ฐ€์–ด ์˜ค๋ฅด๋Š” ๊ธฐ๋ถ„์ด์—์š”. "
15
+ else:
16
+ prefix = "์˜ค๋Š˜์€ ๊ธฐ๋ถ„์ด ์ด์ƒํ•ด์š”. "
17
+
18
+ # prefix๋ฅผ ํ† ํฌ๋‚˜์ด์ง•ํ•˜์—ฌ ์ž…๋ ฅ ์‹œํ€€์Šค ์ƒ์„ฑ
19
+ input_sequence = tokenizer.encode(prefix, return_tensors="pt")
20
+
21
+ # ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ํ…์ŠคํŠธ ์ƒ์„ฑ
22
+ output = model.generate(
23
+ input_sequence,
24
+ max_length=max_length,
25
+ num_return_sequences=num_samples,
26
+ temperature=temperature,
27
+ pad_token_id=tokenizer.eos_token_id
28
+ )
29
+
30
+ # ์ƒ์„ฑ๋œ ์ผ๊ธฐ ๋ฐ˜ํ™˜
31
+ return [tokenizer.decode(output_sequence, skip_special_tokens=True) for output_sequence in output]
32
+
33
+ def main():
34
+ while True:
35
+ # ์‚ฌ์šฉ์ž๋กœ๋ถ€ํ„ฐ ๊ฐ์ • ์ž…๋ ฅ ๋ฐ›๊ธฐ
36
+ try:
37
+ emotion = input("์˜ค๋Š˜์˜ ๊ฐ์ •์„ ์ž…๋ ฅํ•˜์„ธ์š” (happy, sad, angry ๋“ฑ): ")
38
+ if emotion.lower() in ['happy', 'sad', 'angry']:
39
+ break
40
+ else:
41
+ print("์ž…๋ ฅ๋œ ๊ฐ์ •์ด ์ž˜๋ชป๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์‹œ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.")
42
+ except KeyboardInterrupt:
43
+ print("\n์‚ฌ์šฉ์ž๊ฐ€ ์ž…๋ ฅ์„ ์ทจ์†Œํ–ˆ์Šต๋‹ˆ๋‹ค. ํ”„๋กœ๊ทธ๋žจ์„ ์ข…๋ฃŒํ•ฉ๋‹ˆ๋‹ค.")
44
+ return
45
+
46
+ # ์ผ๊ธฐ ์ƒ์„ฑ
47
+ diary_entries = generate_diary(emotion)
48
+ # ์ƒ์„ฑ๋œ ์ผ๊ธฐ ์ถœ๋ ฅ
49
+ print("์˜ค๋Š˜์˜ ์ผ๊ธฐ:")
50
+ for i, entry in enumerate(diary_entries, start=1):
51
+ print(f"{i}. {entry}")
52
+
53
+ if __name__ == "__main__":
54
+ main()
55
+