File size: 10,860 Bytes
2bf8dad
 
 
 
 
47eb7ae
 
2bf8dad
 
 
 
 
9457ca3
2bf8dad
47eb7ae
 
 
9457ca3
 
47eb7ae
2bf8dad
129fca9
 
2bf8dad
 
9457ca3
47eb7ae
 
129fca9
9457ca3
 
cabd1c3
 
 
 
 
 
2bf8dad
 
9457ca3
2bf8dad
cabd1c3
9457ca3
47eb7ae
 
9457ca3
 
2bf8dad
cabd1c3
9457ca3
cabd1c3
 
 
2bf8dad
9457ca3
2bf8dad
 
9457ca3
2bf8dad
 
9457ca3
2bf8dad
 
9457ca3
 
129fca9
cabd1c3
2bf8dad
47eb7ae
2bf8dad
 
 
 
 
 
 
 
129fca9
9457ca3
47eb7ae
9dce5fe
47eb7ae
 
 
 
 
 
 
 
129fca9
47eb7ae
 
 
 
 
 
 
 
129fca9
 
 
 
47eb7ae
129fca9
 
47eb7ae
 
 
 
9dce5fe
cabd1c3
129fca9
 
 
 
 
 
 
 
 
 
 
9457ca3
47eb7ae
9dce5fe
cabd1c3
129fca9
 
 
 
 
 
 
 
 
 
 
9457ca3
47eb7ae
9dce5fe
cabd1c3
129fca9
 
 
 
 
 
 
 
 
 
 
9457ca3
2bf8dad
9457ca3
2bf8dad
cabd1c3
9457ca3
47eb7ae
2bf8dad
47eb7ae
9457ca3
47eb7ae
cabd1c3
47eb7ae
 
 
9457ca3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47eb7ae
 
 
2bf8dad
47eb7ae
2bf8dad
 
9457ca3
2bf8dad
 
 
 
9457ca3
2bf8dad
 
 
 
47eb7ae
2bf8dad
9457ca3
2bf8dad
 
 
 
 
 
9457ca3
2bf8dad
 
9457ca3
47eb7ae
 
 
 
 
 
 
 
 
 
2bf8dad
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
import gradio as gr
import sqlite3
import unicodedata
from typing import List, Dict
import html
import re  # Import the 're' module


def normalize_text(text: str, language: str) -> str:
    """Normalize text based on language-specific rules."""
    if not text:
        return text

    if language == "Arabic":
        # Separate original and normalized forms
        original_text = text
        text = text.replace('أ', 'ا').replace('إ', 'ا').replace('آ', 'ا')
        normalized_text = ''.join(c for c in unicodedata.normalize('NFKD', text)
                                  if not unicodedata.category(c).startswith('M'))
        return original_text, normalized_text.lower()  # Return BOTH
    elif language == "French":
        text = ''.join(c for c in unicodedata.normalize('NFKD', text)
                      if not unicodedata.category(c).startswith('M'))
    elif language == "Amazigh":
        text = text.replace('ⵕ', 'ⵔ').replace('ⵯ', '')

    return text.lower(), text.lower()  # Return tuple for consistency



def search_dictionary(search_term: str,
                     language: str,
                     exact_match: bool,
                     word_match: bool,
                     contains: bool,
                     starts_with: bool,
                     ends_with: bool) -> str:
    if not search_term or search_term.isspace():
        return "<p>Please enter a search term</p>"

    conn = sqlite3.connect('asawal_amqran.db')
    cursor = conn.cursor()

    # Get both original and normalized forms
    original_search, normalized_search = normalize_text(search_term, language)


    search_columns = {
        "Amazigh": ["word", "latin", "construct", "plural", "acc", "accneg", "inacc",
                   "variante", "feminine", "fem_construct", "fem_plural",
                   "fem_plural_construct", "exp_zgh"],
        "Arabic": ["arabic", "exp_ara", "mean_ar"],
        "French": ["french", "exp_fra"]
    }.get(language, [])

    if not search_columns:
        return "<p>Please select a language</p>"

    if not any([exact_match, word_match, contains, starts_with, ends_with]):
        return "<p>Please select at least one search option</p>"

    priority_results = []
    seen_word_ids = set()

    # Priority 1: Exact Match (use original_search)
    if exact_match:
        conditions = [f"LOWER({col}) = ?" for col in search_columns]
        query = f"SELECT * FROM lexie WHERE {' OR '.join(conditions)}"
        params = [original_search.lower()] * len(search_columns)  # Use original, lowercased
        cursor.execute(query, params)
        column_names = [desc[0] for desc in cursor.description]
        word_id_idx = column_names.index('word_id') if 'word_id' in column_names else -1
        for row in cursor.fetchall():
            if word_id_idx != -1:
                word_id = row[word_id_idx]
                if word_id not in seen_word_ids:
                    seen_word_ids.add(word_id)
                    priority_results.append((1, row))

   # Priority 2: Exact Word Match
    if word_match:
        conditions = []
        for col in search_columns:
            conditions.extend([
                f"LOWER({col}) = ?",
                f"LOWER({col}) LIKE ? AND LOWER({col}) NOT LIKE ?",
                f"LOWER({col}) LIKE ? AND LOWER({col}) NOT LIKE ?",
                f"LOWER({col}) LIKE ? AND LOWER({col}) NOT LIKE ?"
            ])
        query = f"SELECT * FROM lexie WHERE {' OR '.join(conditions)}"
        params = []
        for _ in search_columns:
            params.extend([
                normalized_search,  # Use normalized for word matching
                f"{normalized_search} %", f"%{normalized_search}%",
                f"% {normalized_search}", f"%{normalized_search}%",
                f"% {normalized_search} %", f"%{normalized_search}%"
            ])
        cursor.execute(query, params)
        column_names = [desc[0] for desc in cursor.description]
        word_id_idx = column_names.index('word_id') if 'word_id' in column_names else -1
        for row in cursor.fetchall():
            if word_id_idx != -1:
                word_id = row[word_id_idx]
                if word_id not in seen_word_ids:
                    seen_word_ids.add(word_id)
                    priority_results.append((2, row))

    # Priority 3: Contains (use normalized_search)
    if contains:
        conditions = [f"LOWER({col}) LIKE ?" for col in search_columns]
        query = f"SELECT * FROM lexie WHERE {' OR '.join(conditions)}"
        params = [f"%{normalized_search}%"] * len(search_columns)
        cursor.execute(query, params)
        column_names = [desc[0] for desc in cursor.description]
        word_id_idx = column_names.index('word_id') if 'word_id' in column_names else -1
        for row in cursor.fetchall():
            if word_id_idx != -1:
                word_id = row[word_id_idx]
                if word_id not in seen_word_ids:
                    seen_word_ids.add(word_id)
                    priority_results.append((3, row))

    # Priority 4: Starts With (use normalized_search)
    if starts_with:
        conditions = [f"LOWER({col}) LIKE ?" for col in search_columns]
        query = f"SELECT * FROM lexie WHERE {' OR '.join(conditions)}"
        params = [f"{normalized_search}%"] * len(search_columns)
        cursor.execute(query, params)
        column_names = [desc[0] for desc in cursor.description]
        word_id_idx = column_names.index('word_id') if 'word_id' in column_names else -1
        for row in cursor.fetchall():
            if word_id_idx != -1:
                word_id = row[word_id_idx]
                if word_id not in seen_word_ids:
                    seen_word_ids.add(word_id)
                    priority_results.append((4, row))

    # Priority 5: Ends With (use normalized_search)
    if ends_with:
        conditions = [f"LOWER({col}) LIKE ?" for col in search_columns]
        query = f"SELECT * FROM lexie WHERE {' OR '.join(conditions)}"
        params = [f"%{normalized_search}"] * len(search_columns)
        cursor.execute(query, params)
        column_names = [desc[0] for desc in cursor.description]
        word_id_idx = column_names.index('word_id') if 'word_id' in column_names else -1
        for row in cursor.fetchall():
            if word_id_idx != -1:
                word_id = row[word_id_idx]
                if word_id not in seen_word_ids:
                    seen_word_ids.add(word_id)
                    priority_results.append((5, row))

    conn.close()

    if not priority_results:
        return "<p>No results found</p>"

    # Sort by priority
    priority_results.sort(key=lambda x: x[0])
    results = [row for priority, row in priority_results]

    # Format results as HTML (no changes here)
    html_output = "<div style='font-family: Arial, sans-serif;'>"
    if cursor.description:
        column_names = [desc[0] for desc in cursor.description]
        for result in results:
           result_dict = dict(zip(column_names, result))

           html_output += "<div style='border: 1px solid #ccc; margin: 10px; padding: 15px; position: relative;'>"

           if 'source' in result_dict and result_dict['source']:
               html_output += f"<div style='text-align: center; font-style: italic;'>{html.escape(result_dict['source'])}</div>"
           if 'category' in result_dict and result_dict['category']:
               html_output += f"<div style='position: absolute; top: 10px; right: 10px; font-weight: bold;'>{html.escape(result_dict['category'])}</div>"

           html_output += "<h3>Word</h3><ul>"
           for field, label in [
               ('word', 'Word'), ('latin', 'Latin'), ('construct', 'Construct'),
               ('plural', 'Plural'), ('acc', 'Accusative'), ('accneg', 'Negative Accusative'),
               ('inacc', 'Inaccusative'), ('variante', 'Variant'), ('feminine', 'Feminine'),
               ('fem_construct', 'Feminine Construct'), ('fem_plural', 'Feminine Plural'),
               ('fem_plural_construct', 'Feminine Plural Construct')
           ]:
               if field in result_dict and result_dict[field]:
                   html_output += f"<li><strong>{label}:</strong> {html.escape(result_dict[field])}</li>"
           html_output += "</ul>"

           html_output += "<h3>Translations</h3><ul>"
           if 'french' in result_dict and result_dict['french']:
               html_output += f"<li><strong>French:</strong> {html.escape(result_dict['french'])}</li>"
           if 'arabic' in result_dict and result_dict['arabic']:
               html_output += f"<li><strong>Arabic:</strong> {html.escape(result_dict['arabic'])}</li>"
           if 'mean_ar' in result_dict and result_dict['mean_ar']:
               html_output += f"<li><strong>Arabic Meaning:</strong> {html.escape(result_dict['mean_ar'])}</li>"
           html_output += "</ul>"

           html_output += "<h3>Expressions</h3><ul>"
           for field, label in [
               ('exp_zgh', 'Amazigh Expression'), ('exp_fra', 'French Expression'),
               ('exp_ara', 'Arabic Expression')
           ]:
               if field in result_dict and result_dict[field]:
                   html_output += f"<li><strong>{label}:</strong> {html.escape(result_dict[field])}</li>"
           html_output += "</ul>"

           html_output += "</div>"
    else:
        html_output = "<p>No data found</p>"
    html_output += "</div>"
    return html_output
# Gradio interface (no changes)
with gr.Blocks(title="Dictionary Search") as demo:
    gr.Markdown("# Dictionary Search")

    with gr.Row():
        with gr.Column(scale=1):
            search_input = gr.Textbox(label="Search Term", placeholder="Enter search term...")
            search_button = gr.Button("Search")

            gr.Markdown("### Language Options")
            language = gr.Radio(
                choices=["Amazigh", "Arabic", "French"],
                label="Select Language",
                value="Arabic"
            )

            gr.Markdown("### Search Options")
            exact_match = gr.Checkbox(label="Exact Match (whole cell)", value=True)
            word_match = gr.Checkbox(label="Exact Word Match (within cell)", value=True)
            contains = gr.Checkbox(label="Contains", value=True)
            starts_with = gr.Checkbox(label="Starts With", value=False)
            ends_with = gr.Checkbox(label="Ends With", value=False)

        with gr.Column(scale=3):
            output = gr.HTML(label="Results")
    search_params = [search_input, language, exact_match, word_match, contains, starts_with, ends_with]
    search_input.submit(
        search_dictionary,
        inputs=search_params,
        outputs=output
    )
    search_button.click(
        search_dictionary,
        inputs=search_params,
        outputs=output
    )

demo.launch()