yukiapple323 commited on
Commit
429fe63
·
verified ·
1 Parent(s): b52a27f

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +15 -19
index.html CHANGED
@@ -24,28 +24,24 @@ import gradio as gr
24
  from transformers_js_py import pipeline
25
  from filters import convert
26
 
27
- pipe = await pipeline('object-detection', 'Xenova/detr-resnet-50')
 
28
 
29
- async def fn(image):
30
- result = await pipe(image)
31
- return result
32
-
33
- #demo = gr.Interface.from_pipeline(pipe)
34
 
35
- async def predict(image):
36
- result = await pipe(image)
37
- print(result)
38
- result = convert(result)
39
- print(result)
40
- return image, result
41
 
42
  demo = gr.Interface(
43
- fn=predict,
44
- inputs=gr.Image(type='pil'),
45
- outputs=gr.AnnotatedImage(),
46
- title='On-Device Object-Detection with Gradio-Lite & Transformers.js'
47
  )
48
-
49
  demo.launch()
50
  </gradio-file>
51
 
@@ -78,8 +74,8 @@ def convert(input_data):
78
 
79
  <gradio-requirements>
80
  # Same syntax as requirements.txt
81
- transformers-js-py
82
  </gradio-requirements>
83
  </gradio-lite>
84
  </body>
85
- </html>
 
24
  from transformers_js_py import pipeline
25
  from filters import convert
26
 
27
+ # 기존 파이프라인은 object-detection 사용하므로, 새로운 파이프라인을 생성하여 koelectra-base-v3-discriminator 모델을 사용하도록 수정합니다.
28
+ pipe = await pipeline('sentiment-analysis', 'monologg/koelectra-base-v3-discriminator')
29
 
30
+ async def fn(text):
31
+ result = await pipe(text)
32
+ return result
 
 
33
 
34
+ async def predict(text):
35
+ result = await pipe(text)
36
+ return result['label'], result['score']
 
 
 
37
 
38
  demo = gr.Interface(
39
+ fn=predict,
40
+ inputs=gr.Textbox(lines=7, label="Input Text"),
41
+ outputs="label",
42
+ title='Sentiment Analysis with KoELECTRA Model'
43
  )
44
+
45
  demo.launch()
46
  </gradio-file>
47
 
 
74
 
75
  <gradio-requirements>
76
  # Same syntax as requirements.txt
77
+ transformers_js_py
78
  </gradio-requirements>
79
  </gradio-lite>
80
  </body>
81
+ </html>