File size: 3,498 Bytes
f71f174
509ca73
423a42f
822dfd5
509ca73
423a42f
 
 
509ca73
6d8186b
509ca73
25f7cba
 
 
 
7497699
 
 
 
423a42f
 
 
 
 
 
 
8b77a26
772d03e
 
 
 
 
509ca73
 
 
 
423a42f
 
 
 
 
 
 
 
 
 
 
 
 
 
24c6700
509ca73
 
 
 
 
 
 
 
423a42f
509ca73
10928c5
423a42f
509ca73
423a42f
 
 
 
 
 
 
24c6700
 
772d03e
 
 
 
 
24c6700
 
 
 
25f7cba
423a42f
 
 
 
 
 
 
 
 
 
 
 
 
c34d039
24c6700
 
 
 
 
25f7cba
24c6700
25f7cba
24c6700
ef1e39e
423a42f
24c6700
423a42f
 
 
 
 
 
 
 
 
 
24c6700
509ca73
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122

from flask import Flask, request, jsonify
from huggingface_hub import InferenceClient


client = InferenceClient(
    "mistralai/Mistral-7B-Instruct-v0.1"
)

app = Flask(__name__)

file_path = "mentor.txt"
with open(file_path, "r") as file:
    mentors_data = file.read()

@app.route('/')
def home():
    return jsonify({"message": "Welcome to the Recommendation API!"})


def format_prompt(message):
    prompt = "<s>"
    prompt += f"[INST] {message} [/INST]"
    prompt += "</s>"
    return prompt

@app.route('/get_course', methods=['POST'])
def recommend():
    temperature=0.9
    max_new_tokens=256
    top_p=0.95
    repetition_penalty=1.0
    content = request.json
    user_degree = content.get('degree')
    user_stream = content.get('stream')
    user_semester = content.get('semester')
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )
    prompt = f""" prompt: 
    You need to act like as recommendataion engine for course recommendation for student based on below details.

    Degree: {user_degree}
    Stream: {user_stream}
    Current Semester: {user_semester}


    Based on above details recommend the courses that realtes to above details 
    Note: Output should bevalid json format in below format:
    {{"course1:course_name,course2:course_name,course3:course_name,...}}
    
    """
    formatted_prompt = format_prompt(prompt)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return jsonify({"ans":output})

@app.route('/get_mentor', methods=['POST'])
def mentor():
    temperature=0.9
    max_new_tokens=256
    top_p=0.95
    repetition_penalty=1.0
    content = request.json
    user_degree = content.get('degree')
    user_stream = content.get('stream')
    user_semester = content.get('semester')
    courses = content.get('courses')
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )
    prompt = f""" prompt:
    You need to act like as recommendataion engine for mentor recommendation for student based on below details also the list of mentors with their experience is attached.

    Degree: {user_degree}
    Stream: {user_stream}
    Current Semester: {user_semester}
    courses opted:{courses}

    Mentor list= {mentors_data}
    Based on above details recommend the mentor that realtes to above details 
    Note: Output should be valid json format in below format:
    {{"mentor1:mentor_name,mentor2:mentor_name,mentor3:mentor_name,...}}
    """
    formatted_prompt = format_prompt(prompt)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return jsonify({"ans":output})


if __name__ == '__main__':
    app.run(debug=True)