Update index.html
Browse files- index.html +11 -10
index.html
CHANGED
@@ -24,22 +24,22 @@ import gradio as gr
|
|
24 |
from transformers_js_py import pipeline
|
25 |
from filters import convert
|
26 |
|
27 |
-
#
|
28 |
-
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
return result
|
33 |
|
34 |
async def predict(text):
|
35 |
result = await pipe(text)
|
36 |
-
return result
|
37 |
|
38 |
demo = gr.Interface(
|
39 |
fn=predict,
|
40 |
-
inputs=
|
41 |
-
outputs="
|
42 |
-
title=
|
43 |
)
|
44 |
|
45 |
demo.launch()
|
@@ -74,8 +74,9 @@ def convert(input_data):
|
|
74 |
|
75 |
<gradio-requirements>
|
76 |
# Same syntax as requirements.txt
|
77 |
-
|
78 |
</gradio-requirements>
|
79 |
</gradio-lite>
|
80 |
</body>
|
81 |
</html>
|
|
|
|
24 |
from transformers_js_py import pipeline
|
25 |
from filters import convert
|
26 |
|
27 |
+
# 모델과 tokenizer 파일 경로
|
28 |
+
model_name = 'monologg/koelectra-base-v3-discriminator'
|
29 |
+
tokenizer_path = 'tokenizer.json'
|
30 |
|
31 |
+
# 모델 및 tokenizer 로드
|
32 |
+
pipe = await pipeline('sentiment-analysis', model_name, tokenizer=tokenizer_path)
|
|
|
33 |
|
34 |
async def predict(text):
|
35 |
result = await pipe(text)
|
36 |
+
return result
|
37 |
|
38 |
demo = gr.Interface(
|
39 |
fn=predict,
|
40 |
+
inputs="text",
|
41 |
+
outputs="text",
|
42 |
+
title="KoELECTRA Sentiment Analysis"
|
43 |
)
|
44 |
|
45 |
demo.launch()
|
|
|
74 |
|
75 |
<gradio-requirements>
|
76 |
# Same syntax as requirements.txt
|
77 |
+
transformers-js-py
|
78 |
</gradio-requirements>
|
79 |
</gradio-lite>
|
80 |
</body>
|
81 |
</html>
|
82 |
+
|