Spaces:
Runtime error
Runtime error
Commit
·
53dc870
1
Parent(s):
68e5d2d
Adding files
Browse files- .gitattributes +0 -35
- __pycache__/main.cpython-310.pyc +0 -0
- main.py +18 -3
- model.py +44 -0
- static/script.js +22 -0
- static/static.html +35 -0
- static/style.css +17 -0
.gitattributes
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__pycache__/main.cpython-310.pyc
ADDED
Binary file (920 Bytes). View file
|
|
main.py
CHANGED
@@ -1,7 +1,22 @@
|
|
1 |
from fastapi import FastAPI
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
app = FastAPI()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
@app.get('/
|
6 |
-
def
|
7 |
-
return
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from fastapi.staticfiles import StaticFiles
|
3 |
+
from fastapi.responses import FileResponse
|
4 |
+
|
5 |
+
from transformers import pipeline
|
6 |
+
|
7 |
+
from pydantic import BaseModel
|
8 |
|
9 |
app = FastAPI()
|
10 |
+
summarizer = pipeline("summarization", model="Falconsai/text_summarization")
|
11 |
+
|
12 |
+
@app.get("/prediction")
|
13 |
+
def pre(input):
|
14 |
+
output = summarizer(input)
|
15 |
+
print(output)
|
16 |
+
return {"output": output[0]}
|
17 |
+
|
18 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
19 |
|
20 |
+
@app.get('/')
|
21 |
+
def index() -> FileResponse:
|
22 |
+
return FileResponse(path='./static/static.html', media_type="text/html")
|
model.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
from transformers import ( TokenClassificationPipeline, AutoModelForTokenClassification, AutoTokenizer)
|
3 |
+
from transformers.pipelines import AggregationStrategy
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
# ================================= summarize code =================================
|
8 |
+
def summerize(text):
|
9 |
+
summarizer = pipeline("summarization", model="Falconsai/text_summarization")
|
10 |
+
|
11 |
+
text_len = len(text.split(' '))
|
12 |
+
max_length = int((text_len * 80)/100)
|
13 |
+
min_length = int((text_len * 30)/100)
|
14 |
+
|
15 |
+
return{"output": summarizer(text, max_length=max_length, min_length=min_length, do_sample=True)}
|
16 |
+
|
17 |
+
|
18 |
+
# =================================s keywords code =================================
|
19 |
+
|
20 |
+
|
21 |
+
# Define keyphrase extraction pipeline
|
22 |
+
# class KeyphraseExtractionPipeline(TokenClassificationPipeline):
|
23 |
+
# def __init__(self, model, *args, **kwargs):
|
24 |
+
# super().__init__(
|
25 |
+
# model=AutoModelForTokenClassification.from_pretrained(model),
|
26 |
+
# tokenizer=AutoTokenizer.from_pretrained(model),
|
27 |
+
# *args,
|
28 |
+
# **kwargs
|
29 |
+
# )
|
30 |
+
|
31 |
+
# def postprocess(self, all_outputs):
|
32 |
+
# results = super().postprocess(
|
33 |
+
# all_outputs=all_outputs,
|
34 |
+
# aggregatsion_strategy=AggregationStrategy.FIRST,
|
35 |
+
# )
|
36 |
+
# return np.unique([result.get("word").strip() for result in results])
|
37 |
+
|
38 |
+
# # Load pipeline
|
39 |
+
# model_name = "ml6team/keyphrase-extraction-distilbert-inspec"
|
40 |
+
# extractor = KeyphraseExtractionPipeline(model=model_name)
|
41 |
+
|
42 |
+
# keyphrases = extractor(text)
|
43 |
+
|
44 |
+
# print(keyphrases)
|
static/script.js
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// takes a text input and hit the endpoint with the text, then return the output
|
2 |
+
const textGenForm = document.querySelector('form');
|
3 |
+
const translateText = async (text) => {
|
4 |
+
const inferResponse = await fetch(`prediction?input=${text}`);
|
5 |
+
const inferJson = await inferResponse.json();
|
6 |
+
|
7 |
+
return inferJson.output;
|
8 |
+
};
|
9 |
+
|
10 |
+
|
11 |
+
textGenForm.addEventListener('submit', async (event) => {
|
12 |
+
event.preventDefault();
|
13 |
+
|
14 |
+
const textGenInput = document.getElementById('text-gen-input');
|
15 |
+
const textGenParagraph = document.querySelector('.text-gen-output');
|
16 |
+
|
17 |
+
try {
|
18 |
+
textGenParagraph.textContent = await translateText(textGenInput.value);
|
19 |
+
} catch (err) {
|
20 |
+
console.error(err);
|
21 |
+
}
|
22 |
+
});
|
static/static.html
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
|
4 |
+
<head>
|
5 |
+
<meta charset="UTF-8" />
|
6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
7 |
+
<title>Buddy</title>
|
8 |
+
<link rel="stylesheet" href="../static/style.css" />
|
9 |
+
<script type="module" src="../static/script.js"></script>
|
10 |
+
</head>
|
11 |
+
|
12 |
+
<body>
|
13 |
+
<main>
|
14 |
+
<section id="text-gen">
|
15 |
+
<h1>Learning becomes easy</h1>
|
16 |
+
<form>
|
17 |
+
<p>Input your paragraph here</p>
|
18 |
+
<label for="text-gen-input"></label>
|
19 |
+
<input id="text-gen-input" type="text" placeholder="Enter your parargraph here" />
|
20 |
+
<button id="text-gen-submit">Submit</button>
|
21 |
+
</form>
|
22 |
+
</section>
|
23 |
+
<section>
|
24 |
+
<p class="text-gen-output"></p>
|
25 |
+
<ul>
|
26 |
+
<li><span>Foo</span></li>
|
27 |
+
<li><span>Bar</span></li>
|
28 |
+
<li><span>Bat</span></li>
|
29 |
+
</ul>
|
30 |
+
</section>
|
31 |
+
</main>
|
32 |
+
|
33 |
+
</body>
|
34 |
+
|
35 |
+
</html>
|
static/style.css
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
h1{
|
2 |
+
text-align: center;
|
3 |
+
}
|
4 |
+
form{
|
5 |
+
margin: 2rem 5rem;
|
6 |
+
}
|
7 |
+
input{
|
8 |
+
margin: 0 auto;
|
9 |
+
width:100%;
|
10 |
+
}
|
11 |
+
ul li {
|
12 |
+
color: red;
|
13 |
+
}
|
14 |
+
|
15 |
+
ul li span {
|
16 |
+
color: blue;
|
17 |
+
}
|