File size: 3,444 Bytes
509ca73
423a42f
822dfd5
1af9f6b
509ca73
6d8186b
509ca73
25f7cba
 
 
 
7497699
 
 
 
333978a
 
10f9ea6
333978a
 
 
 
 
 
10f9ea6
1af9f6b
8b77a26
772d03e
e60d9fc
 
 
 
10f9ea6
e60d9fc
09a4a4b
 
 
 
e60d9fc
09a4a4b
 
 
 
 
 
 
 
 
 
 
 
 
 
5abdf22
 
 
09a4a4b
10f9ea6
e60d9fc
ccb7aa0
370c257
24c6700
 
772d03e
1af9f6b
 
 
 
 
24c6700
 
 
 
25f7cba
1af9f6b
423a42f
 
 
 
 
 
 
 
 
 
 
 
 
c34d039
24c6700
 
 
 
25f7cba
 
24c6700
5abdf22
 
 
24c6700
10f9ea6
423a42f
ccb7aa0
370c257
24c6700
509ca73
1af9f6b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from flask import Flask, request, jsonify
from huggingface_hub import InferenceClient

client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")

app = Flask(__name__)

file_path = "mentor.txt"
with open(file_path, "r") as file:
    mentors_data = file.read()

@app.route('/')
def home():
    return jsonify({"message": "Welcome to the Recommendation API!"})

import random

def format_prompt(message):
    # Generate a random user prompt and bot response pair
    user_prompt = "UserPrompt"
    bot_response = "BotResponse"

    return f"<s>[INST] {user_prompt} [/INST] {bot_response}</s> [INST] {message} [/INST]"



@app.route('/get_course', methods=['POST'])
def recommend():
    temperature = 0.9
    max_new_tokens = 256
    top_p = 0.95
    repetition_penalty = 1.0


    content = request.json
    user_degree = content.get('degree')
    user_stream = content.get('stream')
    user_semester = content.get('semester')

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )
    prompt = f""" prompt: 
    You need to act like as recommendation engine for course recommendation for a student based on below details.
    Degree: {user_degree}
    Stream: {user_stream}
    Current Semester: {user_semester}
    Based on above details recommend the courses that relate to the above details 
    Note: Output should be list in below format:
    [course1, course2, course3,...]
    Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
    """
    formatted_prompt = format_prompt(prompt)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
    return jsonify({"ans": stream})

@app.route('/get_mentor', methods=['POST'])
def mentor():
    temperature = 0.9
    max_new_tokens = 256
    top_p = 0.95
    repetition_penalty = 1.0

    content = request.json
    user_degree = content.get('degree')
    user_stream = content.get('stream')
    user_semester = content.get('semester')
    courses = content.get('courses')

    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )
    prompt = f""" prompt:
    You need to act like as recommendataion engine for mentor recommendation for student based on below details also the list of mentors with their experience is attached.
    Degree: {user_degree}
    Stream: {user_stream}
    Current Semester: {user_semester}
    courses opted:{courses}
    Mentor list= {mentors_data}
    Based on above details recommend the mentor that realtes to above details 
    Note: Output should be list in below format:
    [mentor1,mentor2,mentor3,...]
    Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks
    """
    formatted_prompt = format_prompt(prompt)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False)
    return jsonify({"ans": stream})

if __name__ == '__main__':
    app.run(debug=True)