Commit
·
fdcf510
1
Parent(s):
cf73210
configuring gunicorn timeout length
Browse files- Dockerfile +1 -1
- __pycache__/app.cpython-312.pyc +0 -0
- __pycache__/model.cpython-312.pyc +0 -0
- app.py +4 -2
- model.py +10 -5
- requirements.txt +0 -2
Dockerfile
CHANGED
@@ -18,4 +18,4 @@ RUN mkdir -p /code/huggingface_cache && chmod -R 777 /code/huggingface_cache
|
|
18 |
ENV HF_HOME=/code/huggingface_cache
|
19 |
|
20 |
# Run the application
|
21 |
-
CMD ["sh", "-c", "gunicorn -b 0.0.0.0:7860 app:app"]
|
|
|
18 |
ENV HF_HOME=/code/huggingface_cache
|
19 |
|
20 |
# Run the application
|
21 |
+
CMD ["sh", "-c", "gunicorn -b 0.0.0.0:7860 --timeout 300 app:app"]
|
__pycache__/app.cpython-312.pyc
ADDED
Binary file (1.29 kB). View file
|
|
__pycache__/model.cpython-312.pyc
CHANGED
Binary files a/__pycache__/model.cpython-312.pyc and b/__pycache__/model.cpython-312.pyc differ
|
|
app.py
CHANGED
@@ -5,8 +5,9 @@ app = Flask(__name__)
|
|
5 |
|
6 |
# Load data and train the model globally
|
7 |
df = model.load_data('AI_Human.csv') # Make sure this path is correct
|
8 |
-
|
9 |
-
pipeline = model.create_pipeline(
|
|
|
10 |
|
11 |
@app.route('/', methods=['GET', 'POST'])
|
12 |
def home():
|
@@ -17,5 +18,6 @@ def home():
|
|
17 |
return jsonify({'classification': prediction})
|
18 |
return render_template('home.html')
|
19 |
|
|
|
20 |
if __name__ == '__main__':
|
21 |
app.run(debug=True)
|
|
|
5 |
|
6 |
# Load data and train the model globally
|
7 |
df = model.load_data('AI_Human.csv') # Make sure this path is correct
|
8 |
+
x_train, x_test, y_train, y_test = model.split_data(df)
|
9 |
+
pipeline = model.create_pipeline(x_train, y_train)
|
10 |
+
|
11 |
|
12 |
@app.route('/', methods=['GET', 'POST'])
|
13 |
def home():
|
|
|
18 |
return jsonify({'classification': prediction})
|
19 |
return render_template('home.html')
|
20 |
|
21 |
+
|
22 |
if __name__ == '__main__':
|
23 |
app.run(debug=True)
|
model.py
CHANGED
@@ -5,32 +5,37 @@ from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
|
|
5 |
from sklearn.naive_bayes import MultinomialNB
|
6 |
from sklearn.metrics import accuracy_score, classification_report
|
7 |
|
|
|
8 |
def remove_tags(text):
|
9 |
tags = ['\n', '\'']
|
10 |
for tag in tags:
|
11 |
text = text.replace(tag, '')
|
12 |
return text
|
13 |
|
|
|
14 |
def load_data(filepath):
|
15 |
df = pd.read_csv(filepath)
|
16 |
df['text'] = df['text'].apply(remove_tags)
|
17 |
return df
|
18 |
|
|
|
19 |
def split_data(df):
|
20 |
y = df['generated']
|
21 |
-
|
22 |
-
|
23 |
-
return
|
24 |
|
25 |
-
|
|
|
26 |
pipeline = Pipeline([
|
27 |
('count_vectorizer', CountVectorizer()),
|
28 |
('tfidf_transformer', TfidfTransformer()),
|
29 |
('classifier', MultinomialNB())
|
30 |
])
|
31 |
-
pipeline.fit(
|
32 |
return pipeline
|
33 |
|
|
|
34 |
def predict_text(text, pipeline):
|
35 |
processed_text = remove_tags(text)
|
36 |
prediction = pipeline.predict([processed_text])[0]
|
|
|
5 |
from sklearn.naive_bayes import MultinomialNB
|
6 |
from sklearn.metrics import accuracy_score, classification_report
|
7 |
|
8 |
+
|
9 |
def remove_tags(text):
|
10 |
tags = ['\n', '\'']
|
11 |
for tag in tags:
|
12 |
text = text.replace(tag, '')
|
13 |
return text
|
14 |
|
15 |
+
|
16 |
def load_data(filepath):
|
17 |
df = pd.read_csv(filepath)
|
18 |
df['text'] = df['text'].apply(remove_tags)
|
19 |
return df
|
20 |
|
21 |
+
|
22 |
def split_data(df):
|
23 |
y = df['generated']
|
24 |
+
x = df['text']
|
25 |
+
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
|
26 |
+
return x_train, x_test, y_train, y_test
|
27 |
|
28 |
+
|
29 |
+
def create_pipeline(x_train, y_train):
|
30 |
pipeline = Pipeline([
|
31 |
('count_vectorizer', CountVectorizer()),
|
32 |
('tfidf_transformer', TfidfTransformer()),
|
33 |
('classifier', MultinomialNB())
|
34 |
])
|
35 |
+
pipeline.fit(x_train, y_train)
|
36 |
return pipeline
|
37 |
|
38 |
+
|
39 |
def predict_text(text, pipeline):
|
40 |
processed_text = remove_tags(text)
|
41 |
prediction = pipeline.predict([processed_text])[0]
|
requirements.txt
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
flask
|
2 |
gunicorn
|
3 |
pandas
|
4 |
-
torch==1.10.0
|
5 |
-
tensorflow==2.8.0
|
6 |
scikit-learn
|
7 |
transformers
|
|
|
1 |
flask
|
2 |
gunicorn
|
3 |
pandas
|
|
|
|
|
4 |
scikit-learn
|
5 |
transformers
|