File size: 10,459 Bytes
c50ab00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
import os
import time
import json
import requests
import whisper
import cv2
import pytesseract
import re
import boto3
from moviepy.editor import VideoFileClip
from flask import Flask, request, jsonify
from flask_apscheduler import APScheduler
from flask_cors import CORS
from werkzeug.utils import secure_filename
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
from sentence_transformers import SentenceTransformer
from chromadb import Client as ChromaClient
from chromadb.config import Settings
from chromadb.utils import embedding_functions

app = Flask(__name__)
CORS(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()

# Load Whisper model globally to avoid redundancy
MODEL = whisper.load_model("base")
app.config['UPLOAD_FOLDER'] = "/home/ubuntu/classcut/data"
OCR_TEXT_SUFFIX = "_ocrtext.txt"
TRANSCRIPT_SUFFIX = "_transcript.txt"
DETAILS_SUFFIX = "_details.json"

ALLOWED_VIDEO_EXTENSIONS = {'mp4', 'avi', 'mov', 'mkv'}
ALLOWED_AUDIO_EXTENSIONS = {'wav', 'mp3', 'm4a', 'flac'}

# Initialize Mistral 7B model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    "mistralai/Mistral-7B-v0.1",
    torch_dtype="auto",
    device_map="auto",
    trust_remote_code=True
)

# Initialize SentenceTransformer for embeddings
embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')

# Set the Chroma DB path
CHROMA_PATH = "chroma"

# Initialize Chroma vector store
chroma_client = ChromaClient(Settings(persist_directory=CHROMA_PATH))
collection = chroma_client.get_or_create_collection(name="video_transcripts")

# AWS S3 Configuration
S3_BUCKET = 'classcut-videos'
S3_REGION = 'ap-south-1'  # e.g., 'us-west-1'
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')

s3 = boto3.client('s3', region_name=S3_REGION,
                  aws_access_key_id=AWS_ACCESS_KEY_ID,
                  aws_secret_access_key=AWS_SECRET_ACCESS_KEY)


def upload_to_s3(file_path):
    file_name = file_path.split('/')[-1]
    # Upload the file to S3
    try:
        s3.upload_file(file_path, S3_BUCKET, file_name, ExtraArgs={
            'ContentType': 'binary/octet-stream',
            'ContentDisposition': 'inline'
        })
        # Construct the S3 URL
        s3_url = f"https://{S3_BUCKET}.s3.{S3_REGION}.amazonaws.com/{file_name}"
        print(f"Uploaded {file_name} to S3 bucket: {S3_BUCKET}")
        return s3_url
    except Exception as e:
        print(f"Error uploading {file_name} to S3: {e}")


def extract_audio(video_path):
    """
    Extracts audio from a given video file and saves it as an mp3 file.

    :param video_path: Path to the video file.
    :return: Path to the extracted audio file.
    """
    with VideoFileClip(video_path) as video:
        audio_path = f"{video_path}.mp3"
        video.audio.write_audiofile(audio_path)
    return audio_path


def transcribe_with_timestamps(audio_path):
    """
    Transcribes the given audio file using the Whisper model, including timestamps.

    :param audio_path: Path to the audio file.
    :return: A list of transcribed segments with timestamps.
    """
    result = MODEL.transcribe(audio_path, verbose=True, language='hi')
    return [f"{seg['start']} - {seg['end']}: {seg['text']}" for seg in result["segments"]]


def format_transcript(transcript_segments):
    """
    Formats transcript segments into a single string.

    :param transcript_segments: List of transcript segments.
    :return: Formatted transcript.
    """
    return "\n".join(transcript_segments).replace('\\n', ' ').strip()


def extract_text_from_video(video_path, frame_interval=30):
    """
    Extracts text from video frames using Tesseract OCR and saves unique text.

    :param video_path: Path to the video file.
    :param frame_interval: Interval to capture frames for OCR (in seconds).
    :return: List of unique text found in the video.
    """
    print(f"Attempting to extract text from {video_path}")

    unique_texts = set()
    video = VideoFileClip(video_path)
    duration = int(video.duration)

    print(f"Duration of video: {duration} seconds.")
    print(f"Frame interval: {frame_interval} seconds.")

    for time_sec in range(0, duration, frame_interval):
        frame = video.get_frame(time_sec)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        text = pytesseract.image_to_string(gray)
        if text.strip() and text not in unique_texts:
            unique_texts.add(text.strip())

    with open(f"{video_path}{OCR_TEXT_SUFFIX}", 'w') as file:
        file.writelines(list(unique_texts))

    return list(unique_texts)


def process_video(video_path):
    # Extract audio and transcribe
    audio_path = extract_audio(video_path)
    transcript_segments = transcribe_with_timestamps(audio_path)
    with open(f"{video_path}{TRANSCRIPT_SUFFIX}", 'w') as file:
        file.writelines(transcript_segments)

    # Extract text from video frames
    extract_text_from_video(video_path)

    # Fine-tune the Mistral model on the new transcript
    fine_tune_model(transcript_segments)

    # Add the transcript to ChromaDB
    add_to_chromadb(' '.join(transcript_segments))

    # You can add additional processing if needed
    print(f"Processing of {video_path} completed.")


def allowed_video_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in ALLOWED_VIDEO_EXTENSIONS


def add_to_chromadb(text):
    # Generate embeddings
    embeddings = embedding_model.encode([text])

    # Add to ChromaDB
    collection.add(
        documents=[text],
        embeddings=embeddings.tolist(),
        metadatas=[{'source': 'video_transcript'}]
    )

    print(f"Text appended to ChromaDB.")


def fine_tune_model(transcript_segments):
    # Prepare data for fine-tuning
    print("Preparing data for fine-tuning...")
    dataset = [{'input_ids': tokenizer.encode(text, return_tensors='pt')[0]} for text in transcript_segments]

    # Define training arguments
    training_args = TrainingArguments(
        output_dir='./fine_tuned_model',
        num_train_epochs=1,            # Adjust as needed
        per_device_train_batch_size=1, # Adjust based on your hardware
        save_steps=10,
        save_total_limit=2,
        logging_steps=10,
        learning_rate=5e-5,           # Hyperparameter tuning can be done here
        fp16=True,                    # Enable if using compatible GPU
    )

    # Define a data collator
    def data_collator(features):
        return {'input_ids': [f['input_ids'] for f in features],
                'labels': [f['input_ids'] for f in features]}

    # Initialize Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=dataset,
        data_collator=data_collator,
    )

    # Fine-tune the model
    print("Starting fine-tuning...")
    trainer.train()
    print("Fine-tuning completed.")

    # Save the fine-tuned model
    model.save_pretrained('./fine_tuned_model')
    tokenizer.save_pretrained('./fine_tuned_model')
    print("Fine-tuned model saved.")


def query_chatbot(query_text):
    # Retrieve relevant documents from ChromaDB
    query_embedding = embedding_model.encode([query_text])
    results = collection.query(query_embeddings=query_embedding, n_results=5)
    context_text = " ".join(results['documents'][0])

    # Prepare input for the model
    prompt = f"Context: {context_text}\n\nQuestion: {query_text}\n\nAnswer:"

    inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
    outputs = model.generate(**inputs, max_new_tokens=150)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Extract the answer part
    answer = response.split("Answer:")[-1].strip()
    return answer


@app.route('/hello', methods=['GET'])
def hello():
    return jsonify({'message': 'Hello, World!'})


@app.route('/upload', methods=['POST'])
def upload_file():
    print("Request received.")
    if 'file' not in request.files:
        return jsonify({'error': 'No file part'}), 400

    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400

    if file and allowed_video_file(file.filename):
        filename = secure_filename(file.filename)
        file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)

        if not os.path.exists(file_path):
            print(f"Saving {file.filename} to {file_path}")
            try:
                file.save(file_path)
                scheduler.add_job(func=process_file, args=[file_path], trigger='date', id='file_process_job')
                file_name = file_path.split('/')[-1]
                return jsonify({'filename': f"{file_name}"}), 200
            except Exception as e:
                return jsonify({'error': str(e)}), 502
        else:
            print(f"We have already processed this file - {filename}. Skipping processing.")
            return jsonify({'filename': f"{filename}"}), 200
    else:
        return jsonify({'error': 'File type not allowed'}), 400

x
def process_file(file_path):
    # Your file processing logic here
    print(f'Processing file: {file_path}')
    process_video(file_path)
    # Simulate a long processing task
    time.sleep(10)
    print('File processed!')


@app.route('/details', methods=['POST'])
def get_details():
    data = request.get_json()
    filename = data.get('filename') if data else None
    if filename:
        print(f"Received request for details of filename: {filename}")

    details_json = f"{app.config['UPLOAD_FOLDER']}/{filename}_details.json"
    print(f"Details JSON path: {details_json}")
    if os.path.exists(details_json):
        with open(details_json, 'r') as file:
            details = json.load(file)
            return jsonify(details)
    else:
        return jsonify({'error': 'Details not found'}), 404


@app.route('/chat', methods=['POST'])
def chat():
    chat_msg = request.form.get('chat_msg')
    if chat_msg:
        print(f"Received chat message: {chat_msg}")
        resp = query_chatbot(chat_msg)
        return jsonify({"status": "success", "response": f"{resp}"})
    else:
        return jsonify({"status": "error", "message": "No chat message received"}), 400


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)