File size: 4,806 Bytes
0374538
77f883a
c9125fb
aed7ff3
5fb7b3d
8a25d94
22aad44
 
6fae060
77f883a
 
 
 
 
 
 
 
c9125fb
77f883a
 
 
 
 
c9125fb
 
6cb707e
 
 
 
 
 
 
 
294c6d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bbe4abb
 
 
 
 
6cb707e
294c6d6
bbe4abb
294c6d6
bbe4abb
 
c9125fb
b083e9c
f9cf3a4
 
 
 
 
 
 
b083e9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9cf3a4
 
 
 
c9125fb
 
 
aed7ff3
 
 
 
22727a0
77f883a
 
 
 
 
 
 
 
c9125fb
 
a235fe7
f9cf3a4
 
 
 
 
aed7ff3
 
 
 
 
f9cf3a4
aed7ff3
c9125fb
 
aed7ff3
c9125fb
b083e9c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import gradio as gr
import os
from huggingface_hub import InferenceClient
from huggingface_hub.inference._generated.types.chat_completion import ChatCompletionStreamOutput

# Use the fine-tuned maritime legal model
MODEL = "nomiChroma3.1"
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
):
    messages = [{"role": "system", "content": system_message}]
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})
    messages.append({"role": "user", "content": message})
    response = ""
    try:
        for message in client.chat_completion(
            messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        ):
            try:
                if isinstance(message, ChatCompletionStreamOutput):
                    content = message.choices[0].delta.content
                    if content is not None:
                        response += content
                        yield response
                    if message.choices[0].finish_reason == 'stop':
                        break
                elif isinstance(message, dict):
                    content = message.get('choices', [{}])[0].get('delta', {}).get('content')
                    if content:
                        response += content
                        yield response
                    if message.get('choices', [{}])[0].get('finish_reason') == 'stop':
                        break
                elif isinstance(message, str):
                    if message.strip():  # Only process non-empty strings
                        response += message
                        yield response
                else:
                    print(f"Unexpected message type: {type(message)}")
                    print(f"Message content: {message}")
            except Exception as e:
                print(f"Error processing message: {e}")
                print(f"Problematic message: {message}")
                continue  # Continue to the next message even if there's an error
        
        # Final yield to ensure all content is returned
        if response:
            yield response

    except Exception as e:
        print(f"An error occurred in the main loop: {e}")
        if response:
            yield response
        else:
            yield f"An error occurred: {e}"

# Custom CSS for light blue background and message tiles
custom_css = """
    .gradio-container {
        background-color: #e6f3ff !important;
    }
    .chat-window {
        background-color: #f0f8ff !important;
    }
    .message.user, .message.bot {
        background-color: #e6f3ff !important;
        border: 1px solid #cce4ff !important;
        padding: 15px !important;
        border-radius: 8px !important;
    }
    .message.user {
        background-color: #e1f0ff !important;
    }
    .message.bot {
        background-color: #e6f3ff !important;
    }
    .input-box, .output-box {
        background-color: #e6f3ff !important;
        border: 1px solid #cce4ff !important;
    }
    textarea {
        background-color: #e6f3ff !important;
    }
"""

# Gradio interface setup with custom theme
demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(
            value="You are a maritime legal assistant with expertise strictly in Indian maritime law. Provide detailed legal advice and information based on Indian maritime legal principles and regulations.",
            label="System message"
        ),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p (nucleus sampling)",
        ),
    ],
    title="Maritime Legal Compliance",
    description="This chatbot uses Fine-tuned LLAMA-3.1 model personalised specifically to provide assistance with Indian maritime legal queries.",
    theme=gr.themes.Soft(
        primary_hue="blue",
        secondary_hue="blue",
        neutral_hue="blue",
    ).set_background(color="#e6f3ff"),
    examples=[
        ["What are the key regulations governing ports in India?"],
        ["Explain the concept of cabotage in Indian maritime law."],
        ["What are the legal requirements for registering a vessel in India?"],
    ],
    css=custom_css,
    cache_examples=False,
)

# Launch the Gradio app
if __name__ == "__main__":
    demo.launch()