Spaces:
Sleeping
Sleeping
File size: 6,146 Bytes
829f184 22a5c64 829f184 22a5c64 cc9698d 2c8f446 f4de003 829f184 f4de003 829f184 f4de003 829f184 123222c 9447f48 123222c 9447f48 edf3d14 123222c 9447f48 123222c 9447f48 123222c f4de003 829f184 f4de003 829f184 2c8f446 768f592 2c8f446 768f592 2c8f446 768f592 8cf8548 c9ff6f2 829f184 80a9119 4270323 829f184 4270323 c89e0f3 4270323 9114313 4270323 e2967cd 4270323 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import streamlit as st
import openai
import json
from annotated_text import annotated_text
import os
import achivenment_standards as data
# OpenAI API μ€μ (νκ²½ λ³μμμ μ½μ΄μ΄)
openai.api_key = os.getenv("OPENAI_API_KEY")
#gptμ΄μ©ν΄μ μΆλ‘ ν¨μ λ§λ€κΈ°
def generate_annotated_text(text):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "μ±μ·¨κΈ°μ€ κΈ°λ° νμμ νΉμ± λ° νλ νκ° μμ±\nμ±μ·¨κΈ°μ€μ μ
λ ₯νμλ©΄, ν΄λΉ μ±μ·¨κΈ°μ€μ κΈ°λ°ν νμμ νΉμ± λ° νλμ λν νκ°λ₯Ό annotated_text νμμΌλ‘ μ 곡ν©λλ€. μ±μ·¨κΈ°μ€μ 보며 νμμ νΉμ νλ, μ±μ·¨ μμ€, κ΅μ¬μ μ΄ν, κ·Έλ¦¬κ³ νμμ μλμ κ³ λ €νμ¬ μ²΄κ³μ μΌλ‘ ꡬμ±λ μΆλ ₯μ μ 곡ν©λλ€. μ£Όμ΄λ λ°λμ μλ΅ν©λλ€. \n\nμμ :\nμ
λ ₯: ```μ±μ·¨κΈ°μ€: [6κ΅01-07]μλκ° μ²ν μν©μ μ΄ν΄νκ³ κ³΅κ°νλ©° λ£λ νλλ₯Ό μ§λλ€, [6κ΅01-02] μ견μ μ μνκ³ ν¨κ» μ‘°μ νλ©° ν μνλ€.```\nμΆλ ₯: ```annotated_text(\n (\"νμ μμ μ μκ°μ μΌλͺ©μμ°νκ² μ 리νλ μ΅κ΄μ΄ μμ.\", \"μλ\", \"rgba(255, 0, 0, 0.3)\"),\n (\"μ¬ν νμμ κ΄ν μ£Όμ₯νλ κΈμ°κΈ°λ₯Ό μν¨.\", \"μ±μ·¨μμ€\", \"rgba(0, 0, 255, 0.3)\"),\n (\"μΉκ΅¬μ κ³ λ―Όμ ν΄κ²°ν΄μ£Όλ μν κ·Ήμμ μλλ°©μ λ°°λ €νμ¬ ν΄κ²° κ°λ₯ν λ°©μμ μ μν¨.\", \"μν\", \"rgba(0, 128, 0, 0.3)\"),\n (\"μλκ° μ²ν μν©μ μ΄ν΄νκ³ κ³΅κ°νλ νλλ₯Ό κ°μ§κ³ μΉκ΅¬λ€κ³Ό μλ§ν κ΄κ³λ₯Ό λ§Ίκ³ κ°λ±μ μ‘°μ ν¨.\", \"κ΅μ¬μ΄ν\", \"rgba(128, 128, 128, 0.3)\"),\n (\"μ€κ° λμ΄ μκ°μ μ΄λμ₯μ μ¬μ©νλ λ°©λ² μ νκΈ°λ₯Ό μ£Όμ λ‘ ν ν μμμ μλ§μ κ·Όκ±°μ λ·λ°μΉ¨ν μ μλ μλ£λ₯Ό ν λλ‘ μμ μ μ견μ νλΉνκ² μ μνλ©΄μ λ€λ₯Έ μ¬λμ μ견μ λ₯λμ μΌλ‘ μμ©νκ³ ν¨κ³Όμ μΌλ‘ μ견μ μ‘°μ νλ λ₯λ ₯μ 보μ.\", \"μν\", \"rgba(0, 128, 0, 0.3)\"),\n (\"μλμ μ견μ μ‘΄μ€νκ³ νλ ₯νλ νλλ₯Ό 보μ.\", \"μλ\", \"rgba(255, 0, 0, 0.3)\")\n)\n```"
},
{
"role": "user",
"content": text
}
],
temperature=1,
max_tokens=10000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['message']['content']
# μ μ¬ν λ¬Έμ₯ μμ± ν¨μ
def generate_similar_sentences(base_sentence):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": f"λ€μμ '{base_sentence}'μ μ μ¬ν νμμ νΉμ± λ° νλμ λν νκ° μμ λ¬Έν 10κ°λ₯Ό λ§λ€μ΄λΌ. λ¬Έμ₯μ λμ '~μ,~ν¨,~μ'μΌλ‘ λλλλ‘ ν΄μ€"
},
{
"role": "user",
"content": base_sentence
}
],
temperature=0.7,
max_tokens=10000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
generated_sentences = response.choices[0].message['content'].split('\n')
return [sentence.strip() for sentence in generated_sentences if sentence.strip()]
# Streamlit μ±μ μ λͺ© λ° μ€λͺ
st.title("μ±μ·¨κΈ°μ€ κΈ°λ° νμμ νΉμ± λ° νλ νκ° μμ±")
st.write("μ±μ·¨κΈ°μ€μ μ
λ ₯νμλ©΄, ν΄λΉ μ±μ·¨κΈ°μ€μ κΈ°λ°ν νμμ νΉμ± λ° νλμ λν νκ°λ₯Ό \n\n [νμ νλ, μ±μ·¨ μμ€, κ΅μ¬μ μ΄ν, νμ μλ] 4κ°μ§ μμλ₯Ό μ‘°ν©νμ¬ μ 곡ν©λλ€.")
# μ±μ·¨κΈ°μ€ λ°μ΄ν° κ°μ Έμ€κΈ°
achievement_standards = data.achievement_standards
# νλ
κ΅° μ ν λλ‘λ€μ΄
grade_group = st.selectbox("νλ
κ΅°μ μ ννμΈμ:", list(achievement_standards.keys()))
# μ νλ νλ
κ΅°μ λ°λ₯Έ κ³Όλͺ© λͺ©λ‘
subject_list = list(achievement_standards[grade_group].keys())
subject = st.selectbox("κ³Όλͺ©μ μ ννμΈμ:", subject_list)
# μ νλ κ³Όλͺ©μ λ°λ₯Έ μ±μ·¨κΈ°μ€ λͺ©λ‘
selected_standards = achievement_standards[grade_group][subject]
selected_standard = st.selectbox("μ±μ·¨κΈ°μ€μ μ ννμΈμ:", selected_standards)
# μ νλ μ±μ·¨κΈ°μ€μ ν
μ€νΈ μ
λ ₯μ°½μ κΈ°λ³Έκ°μΌλ‘ μ¬μ©
achievement_standard = st.text_input("μ±μ·¨κΈ°μ€ μ
λ ₯:", value=selected_standard)
# μΈμ
μν μ΄κΈ°ν
if 'selected_sentence' not in st.session_state:
st.session_state.selected_sentence = None
if 'similar_sentences' not in st.session_state:
st.session_state.similar_sentences = []
if 'selected_index' not in st.session_state:
st.session_state.selected_index = 0
# "νκ° μμ±" λ²νΌ ν΄λ¦ μμ λμ
if st.button("νκ° μμ±"):
with st.spinner('λ΅λ³ μμ±μ€...'):
result = generate_annotated_text(achievement_standard)
# κ²°κ³Ό μΆλ ₯
exec(result.replace('```', ''))
# annotated_text κ²°κ³Όμμ λ¬Έμ₯λ§ μΆμΆ
result_lines = result.split('\n')
sentences = []
for line in result_lines:
# "(" λ¬Έμμ΄μ΄ ν¬ν¨λ μμΉλ₯Ό μ°Ύμμ κ·Έ μ΄νμ λ¬Έμμ΄λ§ μΆμΆ
start_idx = line.find('("')
if start_idx != -1:
end_idx = line.find('",', start_idx)
sentence = line[start_idx + 2:end_idx].strip() # "(" λ€μλΆν° "," μ κΉμ§μ λ¬Έμμ΄μ μΆμΆ
sentences.append(sentence)
# λ¬Έμ₯μ λΌλμ€ λ²νΌμΌλ‘ νμ λ° μ νλ λ¬Έμ₯ μ μ₯
selected_index = st.radio("λ¬Έμ₯μ μ ννμΈμ:", range(len(sentences)), format_func=lambda x: sentences[x])
st.session_state.selected_sentence = sentences[selected_index] if sentences else None
# μ μ¬ν λ¬Έμ₯ μμ± λ²νΌ μΆκ°
if st.button("μ μ¬ν 문ꡬ μμ±") and st.session_state.get('selected_sentence'):
with st.spinner('λ¬Έμ₯ μμ±μ€...'):
similar_sentences = generate_similar_sentences(st.session_state.selected_sentence)
for sentence in st.session_state.similar_sentences:
st.write(sentence)
|