NavyaNayer commited on
Commit
7c6f301
·
verified ·
1 Parent(s): e31187e

Upload 25 files

Browse files
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ TOGETHER_API_KEY=tgp_v1_ZtXpkMMiL0mcxIemzOwQgXn53Oc5Z7UvEwkusgTqtXQ
.gitattributes CHANGED
@@ -1,35 +1,4 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ classification_report.png filter=lfs diff=lfs merge=lfs -text
2
+ confusion_matrix.png filter=lfs diff=lfs merge=lfs -text
3
+ precision_recall_curve.png filter=lfs diff=lfs merge=lfs -text
4
+ ui.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ .env
4
+ .env.local
5
+ .env.*.local
6
+ .vscode/
7
+ .idea/
8
+ .DS_Store
9
+
10
+ # Model files
11
+ saved_model/
12
+ *.pth
13
+ intent_classifier.pth
14
+
15
+
classification_report.png ADDED

Git LFS Details

  • SHA256: 90b836680c7692c8b64daecffb6521bb0b0b177df880cf4a3b61bf9c8e28c6d1
  • Pointer size: 131 Bytes
  • Size of remote file: 601 kB
complexity_Score_finetuned.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
+ import torch
3
+ import random
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from datasets import load_dataset
7
+ from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification
8
+ from torch.utils.data import DataLoader
9
+ from transformers import AdamW
10
+ from sklearn.metrics import r2_score, f1_score, mean_absolute_error
11
+
12
+ # Set random seed for reproducibility
13
+ torch.manual_seed(42)
14
+ np.random.seed(42)
15
+ random.seed(42)
16
+
17
+ # Load DEITA-Complexity dataset
18
+ dataset = load_dataset("hkust-nlp/deita-complexity-scorer-data")
19
+ val_data = dataset["validation"]
20
+
21
+ # Initialize tokenizer
22
+ tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
23
+
24
+ # Preprocessing function
25
+ def preprocess_function(examples):
26
+ return tokenizer(examples["input"], truncation=True, padding="max_length", max_length=128)
27
+
28
+ # Tokenize validation dataset
29
+ val_encodings = val_data.map(preprocess_function, batched=True)
30
+
31
+ # Inspect the structure of val_encodings
32
+ print("Validation Encodings Structure:")
33
+ print(val_encodings)
34
+
35
+ # Convert dataset to PyTorch format
36
+ class ComplexityDataset(torch.utils.data.Dataset):
37
+ def __init__(self, encodings):
38
+ self.encodings = encodings
39
+
40
+ def __len__(self):
41
+ return len(self.encodings['input_ids'])
42
+
43
+ def __getitem__(self, idx):
44
+ # Create a dictionary for the inputs
45
+ item = {
46
+ "input_ids": torch.tensor(self.encodings['input_ids'][idx]),
47
+ "attention_mask": torch.tensor(self.encodings['attention_mask'][idx]),
48
+ # Convert target to float if it's a string
49
+ "labels": torch.tensor(float(self.encodings['target'][idx]), dtype=torch.float) # Ensure 'target' is numeric
50
+ }
51
+ return item
52
+
53
+ val_dataset = ComplexityDataset(val_encodings)
54
+
55
+ # Load pre-trained DistilBERT model
56
+ model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=1)
57
+
58
+ # Freeze first 4 transformer layers
59
+ for layer in model.distilbert.transformer.layer[:4]:
60
+ for param in layer.parameters():
61
+ param.requires_grad = False
62
+
63
+ # Define optimizer
64
+ optimizer = AdamW(model.parameters(), lr=2e-5)
65
+
66
+ # Use GPU if available
67
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
68
+ model.to(device)
69
+
70
+ # DataLoader for batching
71
+ val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)
72
+
73
+ # Evaluation function
74
+ def evaluate_model(model, val_loader):
75
+ model.eval()
76
+ val_loss = 0.0
77
+ total_mae = 0.0
78
+ all_predictions = []
79
+ all_labels = []
80
+
81
+ with torch.no_grad():
82
+ for batch in tqdm(val_loader, desc="Evaluating", leave=False):
83
+ batch = {key: val.to(device) for key, val in batch.items()}
84
+ outputs = model(**batch)
85
+ loss = torch.nn.functional.mse_loss(outputs.logits.squeeze(), batch["labels"])
86
+
87
+ val_loss += loss.item()
88
+ total_mae += torch.nn.functional.l1_loss(outputs.logits.squeeze(), batch["labels"], reduction="sum").item()
89
+
90
+ all_predictions.extend(outputs.logits.squeeze().cpu().numpy())
91
+ all_labels.extend(batch["labels"].cpu().numpy())
92
+
93
+ avg_val_loss = val_loss / len(val_loader)
94
+ avg_val_mae = total_mae / len(val_loader.dataset)
95
+
96
+ # Calculate additional metrics
97
+ r2 = r2_score(all_labels, all_predictions)
98
+ f1 = f1_score(np.round(all_labels), np.round(all_predictions), average='weighted')
99
+
100
+ return avg_val_loss, avg_val_mae, r2, f1, all_predictions, all_labels
101
+
102
+ # Evaluate the model
103
+ val_loss, val_mae, r2, f1, predictions, labels = evaluate_model(model, val_loader)
104
+
105
+ print(f"Validation Loss = {val_loss:.4f}, Validation MAE = {val_mae:.4f}, R² Score = {r2:.4f}, F1 Score = {f1:.4f}")
106
+
107
+ # Testing the model (inference on the validation set)
108
+ def test_model(model, val_loader):
109
+ model.eval()
110
+ all_predictions = []
111
+ all_labels = []
112
+
113
+ with torch.no_grad():
114
+ for batch in tqdm(val_loader, desc="Testing", leave=False):
115
+ batch = {key: val.to(device) for key, val in batch.items()}
116
+ outputs = model(**batch)
117
+
118
+ all_predictions.extend(outputs.logits.squeeze().cpu().numpy())
119
+ all_labels.extend(batch["labels"].cpu().numpy())
120
+
121
+ return np.array(all_predictions), np.array(all_labels)
122
+
123
+ # Get predictions and labels from the test function
124
+ test_predictions, test_labels = test_model(model, val_loader)
125
+
126
+ # You can also calculate the evaluation metrics on the test predictions
127
+ test_r2 = r2_score(test_labels, test_predictions)
128
+ test_f1 = f1_score(np.round(test_labels), np.round(test_predictions), average='weighted')
129
+
130
+ print(f"Test R² Score = {test_r2:.4f}, Test F1 Score = {test_f1:.4f}")
131
+
132
+ # Save the fine-tuned model
133
+ model.save_pretrained("fine_tuned_deita_model")
134
+ tokenizer.save_pretrained("fine_tuned_deita_model")
135
+
136
+ print("✅ Evaluation and testing complete! Model saved at 'fine_tuned_deita_model'.")
137
+ =======
138
+ import torch
139
+ import random
140
+ import numpy as np
141
+ from tqdm import tqdm
142
+ from datasets import load_dataset
143
+ from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification
144
+ from torch.utils.data import DataLoader
145
+ from transformers import AdamW
146
+ from sklearn.metrics import r2_score, f1_score, mean_absolute_error
147
+
148
+ # Set random seed for reproducibility
149
+ torch.manual_seed(42)
150
+ np.random.seed(42)
151
+ random.seed(42)
152
+
153
+ # Load DEITA-Complexity dataset
154
+ dataset = load_dataset("hkust-nlp/deita-complexity-scorer-data")
155
+ val_data = dataset["validation"]
156
+
157
+ # Initialize tokenizer
158
+ tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
159
+
160
+ # Preprocessing function
161
+ def preprocess_function(examples):
162
+ return tokenizer(examples["input"], truncation=True, padding="max_length", max_length=128)
163
+
164
+ # Tokenize validation dataset
165
+ val_encodings = val_data.map(preprocess_function, batched=True)
166
+
167
+ # Inspect the structure of val_encodings
168
+ print("Validation Encodings Structure:")
169
+ print(val_encodings)
170
+
171
+ # Convert dataset to PyTorch format
172
+ class ComplexityDataset(torch.utils.data.Dataset):
173
+ def __init__(self, encodings):
174
+ self.encodings = encodings
175
+
176
+ def __len__(self):
177
+ return len(self.encodings['input_ids'])
178
+
179
+ def __getitem__(self, idx):
180
+ # Create a dictionary for the inputs
181
+ item = {
182
+ "input_ids": torch.tensor(self.encodings['input_ids'][idx]),
183
+ "attention_mask": torch.tensor(self.encodings['attention_mask'][idx]),
184
+ # Convert target to float if it's a string
185
+ "labels": torch.tensor(float(self.encodings['target'][idx]), dtype=torch.float) # Ensure 'target' is numeric
186
+ }
187
+ return item
188
+
189
+ val_dataset = ComplexityDataset(val_encodings)
190
+
191
+ # Load pre-trained DistilBERT model
192
+ model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=1)
193
+
194
+ # Freeze first 4 transformer layers
195
+ for layer in model.distilbert.transformer.layer[:4]:
196
+ for param in layer.parameters():
197
+ param.requires_grad = False
198
+
199
+ # Define optimizer
200
+ optimizer = AdamW(model.parameters(), lr=2e-5)
201
+
202
+ # Use GPU if available
203
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
204
+ model.to(device)
205
+
206
+ # DataLoader for batching
207
+ val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)
208
+
209
+ # Evaluation function
210
+ def evaluate_model(model, val_loader):
211
+ model.eval()
212
+ val_loss = 0.0
213
+ total_mae = 0.0
214
+ all_predictions = []
215
+ all_labels = []
216
+
217
+ with torch.no_grad():
218
+ for batch in tqdm(val_loader, desc="Evaluating", leave=False):
219
+ batch = {key: val.to(device) for key, val in batch.items()}
220
+ outputs = model(**batch)
221
+ loss = torch.nn.functional.mse_loss(outputs.logits.squeeze(), batch["labels"])
222
+
223
+ val_loss += loss.item()
224
+ total_mae += torch.nn.functional.l1_loss(outputs.logits.squeeze(), batch["labels"], reduction="sum").item()
225
+
226
+ all_predictions.extend(outputs.logits.squeeze().cpu().numpy())
227
+ all_labels.extend(batch["labels"].cpu().numpy())
228
+
229
+ avg_val_loss = val_loss / len(val_loader)
230
+ avg_val_mae = total_mae / len(val_loader.dataset)
231
+
232
+ # Calculate additional metrics
233
+ r2 = r2_score(all_labels, all_predictions)
234
+ f1 = f1_score(np.round(all_labels), np.round(all_predictions), average='weighted')
235
+
236
+ return avg_val_loss, avg_val_mae, r2, f1, all_predictions, all_labels
237
+
238
+ # Evaluate the model
239
+ val_loss, val_mae, r2, f1, predictions, labels = evaluate_model(model, val_loader)
240
+
241
+ print(f"Validation Loss = {val_loss:.4f}, Validation MAE = {val_mae:.4f}, R² Score = {r2:.4f}, F1 Score = {f1:.4f}")
242
+
243
+ # Testing the model (inference on the validation set)
244
+ def test_model(model, val_loader):
245
+ model.eval()
246
+ all_predictions = []
247
+ all_labels = []
248
+
249
+ with torch.no_grad():
250
+ for batch in tqdm(val_loader, desc="Testing", leave=False):
251
+ batch = {key: val.to(device) for key, val in batch.items()}
252
+ outputs = model(**batch)
253
+
254
+ all_predictions.extend(outputs.logits.squeeze().cpu().numpy())
255
+ all_labels.extend(batch["labels"].cpu().numpy())
256
+
257
+ return np.array(all_predictions), np.array(all_labels)
258
+
259
+ # Get predictions and labels from the test function
260
+ test_predictions, test_labels = test_model(model, val_loader)
261
+
262
+ # You can also calculate the evaluation metrics on the test predictions
263
+ test_r2 = r2_score(test_labels, test_predictions)
264
+ test_f1 = f1_score(np.round(test_labels), np.round(test_predictions), average='weighted')
265
+
266
+ print(f"Test R² Score = {test_r2:.4f}, Test F1 Score = {test_f1:.4f}")
267
+
268
+ # Save the fine-tuned model
269
+ model.save_pretrained("fine_tuned_deita_model")
270
+ tokenizer.save_pretrained("fine_tuned_deita_model")
271
+
272
+ print("✅ Evaluation and testing complete! Model saved at 'fine_tuned_deita_model'.")
273
+ >>>>>>> b1313c5d084e410cadf261f2fafd8929cb149a4f
complexity_score.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
+
5
+ # Load the tokenizer and model
6
+ tokenizer = AutoTokenizer.from_pretrained("thethinkmachine/Maxwell-Task-Complexity-Scorer-v0.2")
7
+ model = AutoModelForSequenceClassification.from_pretrained("thethinkmachine/Maxwell-Task-Complexity-Scorer-v0.2")
8
+
9
+ # Example task
10
+ task_description = "find a new theory"
11
+
12
+ # Tokenize the input
13
+ inputs = tokenizer(task_description, return_tensors="pt")
14
+
15
+ # Perform inference
16
+ with torch.no_grad():
17
+ outputs = model(**inputs)
18
+ complexity_score = torch.sigmoid(outputs.logits).item()
19
+
20
+ print(f"Task Complexity Score: {complexity_score:.4f}")
21
+ =======
22
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
23
+ import torch
24
+
25
+ # Load the tokenizer and model
26
+ tokenizer = AutoTokenizer.from_pretrained("thethinkmachine/Maxwell-Task-Complexity-Scorer-v0.2")
27
+ model = AutoModelForSequenceClassification.from_pretrained("thethinkmachine/Maxwell-Task-Complexity-Scorer-v0.2")
28
+
29
+ # Example task
30
+ task_description = "find a new theory"
31
+
32
+ # Tokenize the input
33
+ inputs = tokenizer(task_description, return_tensors="pt")
34
+
35
+ # Perform inference
36
+ with torch.no_grad():
37
+ outputs = model(**inputs)
38
+ complexity_score = torch.sigmoid(outputs.logits).item()
39
+
40
+ print(f"Task Complexity Score: {complexity_score:.4f}")
41
+ >>>>>>> b1313c5d084e410cadf261f2fafd8929cb149a4f
confusion_matrix.png ADDED

Git LFS Details

  • SHA256: 820a678c410782c280c48235ae92761fdc593969007bfc44a5aa809c26481db5
  • Pointer size: 131 Bytes
  • Size of remote file: 207 kB
download_models.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+
4
+ def download_base_models():
5
+ # Create models directory
6
+ os.makedirs("pretrained_models", exist_ok=True)
7
+
8
+ print("Downloading BERT base model...")
9
+ # Download and save BERT base model
10
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
11
+ model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
12
+
13
+ # Save models locally
14
+ tokenizer.save_pretrained("pretrained_models/bert-base-uncased")
15
+ model.save_pretrained("pretrained_models/bert-base-uncased")
16
+ print("Base models downloaded successfully!")
17
+
18
+ if __name__ == "__main__":
19
+ download_base_models()
emotions.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
+ import pandas as pd
3
+ import torch
4
+ from datasets import load_dataset, Dataset
5
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
6
+ import numpy as np
7
+ from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, classification_report
8
+
9
+ # Load dataset
10
+ dataset = load_dataset("go_emotions")
11
+
12
+ # Print dataset columns
13
+ print("Dataset Columns Before Preprocessing:", dataset["train"].column_names)
14
+
15
+ # Ensure labels exist
16
+ if "labels" not in dataset["train"].column_names:
17
+ raise KeyError("Column 'labels' is missing! Check dataset structure.")
18
+
19
+ # Load tokenizer
20
+ model_checkpoint = "distilbert-base-uncased"
21
+
22
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
23
+
24
+ # Preprocessing function (Take only the first label for single-label classification)
25
+ def preprocess_data(batch):
26
+ encoding = tokenizer(batch["text"], padding="max_length", truncation=True)
27
+
28
+ # Take only the first label (for single-label classification)
29
+ encoding["labels"] = batch["labels"][0] if batch["labels"] else 0 # Default to 0 if empty
30
+ return encoding
31
+
32
+ # Tokenize dataset
33
+ encoded_dataset = dataset.map(preprocess_data, batched=False, remove_columns=["text"])
34
+
35
+ # Set format for PyTorch
36
+ encoded_dataset.set_format("torch")
37
+
38
+ # Load model for single-label classification (28 classes)
39
+ num_labels = 28 # Change based on dataset labels
40
+ model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)
41
+
42
+ # Training arguments
43
+ args = TrainingArguments(
44
+ output_dir="./results",
45
+ eval_strategy="epoch",
46
+ save_strategy="epoch",
47
+ save_total_limit=1,
48
+ logging_strategy="no",
49
+ per_device_train_batch_size=32, # Increase batch size
50
+ per_device_eval_batch_size=32,
51
+ num_train_epochs=2, # Reduce epochs
52
+ weight_decay=0.01,
53
+ load_best_model_at_end=True,
54
+ fp16=True, # Mixed precision for speedup
55
+ gradient_accumulation_steps=2, # Helps with large batch sizes
56
+ )
57
+
58
+
59
+ # Compute metrics function
60
+ def compute_metrics(eval_pred):
61
+ logits, labels = eval_pred
62
+
63
+ # Convert logits to class predictions
64
+ predictions = np.argmax(logits, axis=-1)
65
+
66
+ accuracy = accuracy_score(labels, predictions)
67
+ f1 = f1_score(labels, predictions, average="weighted")
68
+
69
+ return {"accuracy": accuracy, "f1": f1}
70
+
71
+ # Initialize Trainer
72
+ trainer = Trainer(
73
+ model=model,
74
+ args=args,
75
+ train_dataset=encoded_dataset["train"],
76
+ eval_dataset=encoded_dataset["validation"],
77
+ compute_metrics=compute_metrics
78
+ )
79
+
80
+ # Train model
81
+ trainer.train()
82
+ print("Training completed!")
83
+
84
+ # Save model and tokenizer
85
+ model.save_pretrained("./saved_model")
86
+ tokenizer.save_pretrained("./saved_model")
87
+ print("Model and tokenizer saved!")
88
+
89
+ # ====== Evaluation on Test Set ======
90
+ print("\nEvaluating model on test set...")
91
+
92
+ # Get test dataset
93
+ test_dataset = encoded_dataset["test"]
94
+
95
+ # Make predictions
96
+ predictions = trainer.predict(test_dataset)
97
+ logits = predictions.predictions
98
+
99
+ # Convert logits to class predictions
100
+ y_pred = np.argmax(logits, axis=-1)
101
+ y_true = test_dataset["labels"].numpy()
102
+
103
+ # Compute accuracy and F1-score
104
+ accuracy = accuracy_score(y_true, y_pred)
105
+ f1 = f1_score(y_true, y_pred, average="weighted")
106
+
107
+ # Print evaluation results
108
+ print("\nEvaluation Results:")
109
+ print(f"Test Accuracy: {accuracy:.4f}")
110
+ print(f"Test F1 Score: {f1:.4f}")
111
+
112
+ # Print classification report
113
+ print("\nClassification Report:\n", classification_report(y_true, y_pred))
114
+
115
+ # Save test results
116
+ pd.DataFrame({"true_labels": y_true.tolist(), "predicted_labels": y_pred.tolist()}).to_csv("test_results.csv", index=False)
117
+ print("Test results saved to 'test_results.csv'!")
118
+ =======
119
+ import pandas as pd
120
+ import torch
121
+ from datasets import load_dataset, Dataset
122
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
123
+ import numpy as np
124
+ from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, classification_report
125
+
126
+ # Load dataset
127
+ dataset = load_dataset("go_emotions")
128
+
129
+ # Print dataset columns
130
+ print("Dataset Columns Before Preprocessing:", dataset["train"].column_names)
131
+
132
+ # Ensure labels exist
133
+ if "labels" not in dataset["train"].column_names:
134
+ raise KeyError("Column 'labels' is missing! Check dataset structure.")
135
+
136
+ # Load tokenizer
137
+ model_checkpoint = "distilbert-base-uncased"
138
+
139
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
140
+
141
+ # Preprocessing function (Take only the first label for single-label classification)
142
+ def preprocess_data(batch):
143
+ encoding = tokenizer(batch["text"], padding="max_length", truncation=True)
144
+
145
+ # Take only the first label (for single-label classification)
146
+ encoding["labels"] = batch["labels"][0] if batch["labels"] else 0 # Default to 0 if empty
147
+ return encoding
148
+
149
+ # Tokenize dataset
150
+ encoded_dataset = dataset.map(preprocess_data, batched=False, remove_columns=["text"])
151
+
152
+ # Set format for PyTorch
153
+ encoded_dataset.set_format("torch")
154
+
155
+ # Load model for single-label classification (28 classes)
156
+ num_labels = 28 # Change based on dataset labels
157
+ model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)
158
+
159
+ # Training arguments
160
+ args = TrainingArguments(
161
+ output_dir="./results",
162
+ eval_strategy="epoch",
163
+ save_strategy="epoch",
164
+ save_total_limit=1,
165
+ logging_strategy="no",
166
+ per_device_train_batch_size=32, # Increase batch size
167
+ per_device_eval_batch_size=32,
168
+ num_train_epochs=2, # Reduce epochs
169
+ weight_decay=0.01,
170
+ load_best_model_at_end=True,
171
+ fp16=True, # Mixed precision for speedup
172
+ gradient_accumulation_steps=2, # Helps with large batch sizes
173
+ )
174
+
175
+
176
+ # Compute metrics function
177
+ def compute_metrics(eval_pred):
178
+ logits, labels = eval_pred
179
+
180
+ # Convert logits to class predictions
181
+ predictions = np.argmax(logits, axis=-1)
182
+
183
+ accuracy = accuracy_score(labels, predictions)
184
+ f1 = f1_score(labels, predictions, average="weighted")
185
+
186
+ return {"accuracy": accuracy, "f1": f1}
187
+
188
+ # Initialize Trainer
189
+ trainer = Trainer(
190
+ model=model,
191
+ args=args,
192
+ train_dataset=encoded_dataset["train"],
193
+ eval_dataset=encoded_dataset["validation"],
194
+ compute_metrics=compute_metrics
195
+ )
196
+
197
+ # Train model
198
+ trainer.train()
199
+ print("Training completed!")
200
+
201
+ # Save model and tokenizer
202
+ model.save_pretrained("./saved_model")
203
+ tokenizer.save_pretrained("./saved_model")
204
+ print("Model and tokenizer saved!")
205
+
206
+ # ====== Evaluation on Test Set ======
207
+ print("\nEvaluating model on test set...")
208
+
209
+ # Get test dataset
210
+ test_dataset = encoded_dataset["test"]
211
+
212
+ # Make predictions
213
+ predictions = trainer.predict(test_dataset)
214
+ logits = predictions.predictions
215
+
216
+ # Convert logits to class predictions
217
+ y_pred = np.argmax(logits, axis=-1)
218
+ y_true = test_dataset["labels"].numpy()
219
+
220
+ # Compute accuracy and F1-score
221
+ accuracy = accuracy_score(y_true, y_pred)
222
+ f1 = f1_score(y_true, y_pred, average="weighted")
223
+
224
+ # Print evaluation results
225
+ print("\nEvaluation Results:")
226
+ print(f"Test Accuracy: {accuracy:.4f}")
227
+ print(f"Test F1 Score: {f1:.4f}")
228
+
229
+ # Print classification report
230
+ print("\nClassification Report:\n", classification_report(y_true, y_pred))
231
+
232
+ # Save test results
233
+ pd.DataFrame({"true_labels": y_true.tolist(), "predicted_labels": y_pred.tolist()}).to_csv("test_results.csv", index=False)
234
+ print("Test results saved to 'test_results.csv'!")
235
+ >>>>>>> b1313c5d084e410cadf261f2fafd8929cb149a4f
emotions.txt ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
+ PS C:\Users\NAVYA\Documents\moodify> python emotions.py
3
+ 2025-02-26 20:38:46.440320: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
4
+ 2025-02-26 20:38:47.658979: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
5
+ WARNING:tensorflow:From C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\tf_keras\src\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.
6
+
7
+ Dataset Columns Before Preprocessing: ['text', 'labels', 'id']
8
+ Map: 100%|█████████████████████████████████████████████████████████████████████████████████████| 43410/43410 [00:22<00:00, 1958.97 examples/s]
9
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████| 5426/5426 [00:03<00:00, 1796.32 examples/s]
10
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████| 5427/5427 [00:02<00:00, 1936.32 examples/s]
11
+ Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.bias', 'pre_classifier.weight']
12
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
13
+ {'eval_loss': 1.414624571800232, 'eval_accuracy': 0.5748249170659786, 'eval_f1': 0.55625264544128, 'eval_runtime': 37.1848, 'eval_samples_per_second': 145.92, 'eval_steps_per_second': 4.572, 'epoch': 1.0}
14
+ {'eval_loss': 1.3568519353866577, 'eval_accuracy': 0.5895687430888316, 'eval_f1': 0.5727110766843768, 'eval_runtime': 38.7582, 'eval_samples_per_second': 139.996, 'eval_steps_per_second': 4.386, 'epoch': 2.0}
15
+ {'train_runtime': 6368.0108, 'train_samples_per_second': 13.634, 'train_steps_per_second': 0.213, 'train_loss': 1.50392983585684, 'epoch': 2.0}
16
+ 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 1356/1356 [1:46:08<00:00, 4.70s/it]
17
+ Training completed!
18
+ Model and tokenizer saved!
19
+
20
+ Evaluating model on test set...
21
+ 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████| 170/170 [00:38<00:00, 4.43it/s]
22
+
23
+ Evaluation Results:
24
+ Test Accuracy: 0.5779
25
+ Test F1 Score: 0.5608
26
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
27
+ _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
28
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
29
+ _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
30
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
31
+ _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
32
+
33
+ Classification Report:
34
+ precision recall f1-score support
35
+
36
+ 0 0.65 0.74 0.69 504
37
+ 1 0.73 0.86 0.79 252
38
+ 2 0.47 0.47 0.47 197
39
+ 3 0.32 0.20 0.25 286
40
+ 4 0.54 0.35 0.42 318
41
+ 5 0.46 0.40 0.43 114
42
+ 6 0.47 0.39 0.43 139
43
+ 7 0.43 0.61 0.51 233
44
+ 8 0.60 0.42 0.49 74
45
+ 9 0.38 0.22 0.28 127
46
+ 10 0.42 0.37 0.39 220
47
+ 11 0.48 0.40 0.44 84
48
+ 12 0.71 0.40 0.51 30
49
+ 13 0.48 0.39 0.43 84
50
+ 14 0.59 0.70 0.64 74
51
+ 15 0.84 0.83 0.83 288
52
+ 16 0.00 0.00 0.00 6
53
+ 17 0.52 0.56 0.54 116
54
+ 18 0.65 0.82 0.72 169
55
+ 19 0.00 0.00 0.00 16
56
+ 20 0.56 0.49 0.52 120
57
+ 21 0.00 0.00 0.00 8
58
+ 22 0.47 0.08 0.14 109
59
+ 23 0.00 0.00 0.00 7
60
+ 24 0.57 0.74 0.64 46
61
+ 25 0.55 0.47 0.51 108
62
+ 26 0.42 0.48 0.44 92
63
+ 27 0.60 0.71 0.65 1606
64
+
65
+ accuracy 0.58 5427
66
+ macro avg 0.46 0.43 0.44 5427
67
+ weighted avg 0.56 0.58 0.56 5427
68
+
69
+ Test results saved to 'test_results.csv'!
70
+ =======
71
+ PS C:\Users\NAVYA\Documents\moodify> python emotions.py
72
+ 2025-02-26 20:38:46.440320: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
73
+ 2025-02-26 20:38:47.658979: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
74
+ WARNING:tensorflow:From C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\tf_keras\src\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.
75
+
76
+ Dataset Columns Before Preprocessing: ['text', 'labels', 'id']
77
+ Map: 100%|█████████████████████████████████████████████████████████████████████████████████████| 43410/43410 [00:22<00:00, 1958.97 examples/s]
78
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████| 5426/5426 [00:03<00:00, 1796.32 examples/s]
79
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████| 5427/5427 [00:02<00:00, 1936.32 examples/s]
80
+ Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.bias', 'pre_classifier.weight']
81
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
82
+ {'eval_loss': 1.414624571800232, 'eval_accuracy': 0.5748249170659786, 'eval_f1': 0.55625264544128, 'eval_runtime': 37.1848, 'eval_samples_per_second': 145.92, 'eval_steps_per_second': 4.572, 'epoch': 1.0}
83
+ {'eval_loss': 1.3568519353866577, 'eval_accuracy': 0.5895687430888316, 'eval_f1': 0.5727110766843768, 'eval_runtime': 38.7582, 'eval_samples_per_second': 139.996, 'eval_steps_per_second': 4.386, 'epoch': 2.0}
84
+ {'train_runtime': 6368.0108, 'train_samples_per_second': 13.634, 'train_steps_per_second': 0.213, 'train_loss': 1.50392983585684, 'epoch': 2.0}
85
+ 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 1356/1356 [1:46:08<00:00, 4.70s/it]
86
+ Training completed!
87
+ Model and tokenizer saved!
88
+
89
+ Evaluating model on test set...
90
+ 100%|██████████████████████████████████���████████████████████████████████████████████████████████████████████| 170/170 [00:38<00:00, 4.43it/s]
91
+
92
+ Evaluation Results:
93
+ Test Accuracy: 0.5779
94
+ Test F1 Score: 0.5608
95
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
96
+ _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
97
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
98
+ _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
99
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
100
+ _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
101
+
102
+ Classification Report:
103
+ precision recall f1-score support
104
+
105
+ 0 0.65 0.74 0.69 504
106
+ 1 0.73 0.86 0.79 252
107
+ 2 0.47 0.47 0.47 197
108
+ 3 0.32 0.20 0.25 286
109
+ 4 0.54 0.35 0.42 318
110
+ 5 0.46 0.40 0.43 114
111
+ 6 0.47 0.39 0.43 139
112
+ 7 0.43 0.61 0.51 233
113
+ 8 0.60 0.42 0.49 74
114
+ 9 0.38 0.22 0.28 127
115
+ 10 0.42 0.37 0.39 220
116
+ 11 0.48 0.40 0.44 84
117
+ 12 0.71 0.40 0.51 30
118
+ 13 0.48 0.39 0.43 84
119
+ 14 0.59 0.70 0.64 74
120
+ 15 0.84 0.83 0.83 288
121
+ 16 0.00 0.00 0.00 6
122
+ 17 0.52 0.56 0.54 116
123
+ 18 0.65 0.82 0.72 169
124
+ 19 0.00 0.00 0.00 16
125
+ 20 0.56 0.49 0.52 120
126
+ 21 0.00 0.00 0.00 8
127
+ 22 0.47 0.08 0.14 109
128
+ 23 0.00 0.00 0.00 7
129
+ 24 0.57 0.74 0.64 46
130
+ 25 0.55 0.47 0.51 108
131
+ 26 0.42 0.48 0.44 92
132
+ 27 0.60 0.71 0.65 1606
133
+
134
+ accuracy 0.58 5427
135
+ macro avg 0.46 0.43 0.44 5427
136
+ weighted avg 0.56 0.58 0.56 5427
137
+
138
+ Test results saved to 'test_results.csv'!
139
+ >>>>>>> b1313c5d084e410cadf261f2fafd8929cb149a4f
140
+ PS C:\Users\NAVYA\Doc
intent_classifier.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from torch.utils.data import Dataset, DataLoader
5
+ from transformers import BertTokenizer, BertForSequenceClassification
6
+ from datasets import load_dataset
7
+ from tqdm import tqdm
8
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
9
+
10
+ # Check for CUDA
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ print(device)
13
+
14
+ # Load CLINC-OOS Dataset (Correct Config)
15
+ dataset = load_dataset("clinc_oos", "plus")
16
+
17
+ # Tokenizer
18
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
19
+
20
+ # Preprocess Dataset
21
+ class IntentDataset(Dataset):
22
+ def __init__(self, dataset_split):
23
+ self.texts = dataset_split["text"]
24
+ self.labels = dataset_split["intent"]
25
+ self.label_map = {label: i for i, label in enumerate(set(self.labels))} # Create label mapping
26
+
27
+ def __len__(self):
28
+ return len(self.texts)
29
+
30
+ def __getitem__(self, idx):
31
+ inputs = tokenizer(self.texts[idx], padding="max_length", truncation=True, max_length=64, return_tensors="pt")
32
+ label = self.labels[idx]
33
+ if label not in self.label_map:
34
+ raise ValueError(f"Unexpected label {label} found in dataset") # Debugging step
35
+ return {key: val.squeeze(0) for key, val in inputs.items()}, torch.tensor(self.label_map[label])
36
+
37
+ # Create Dataloaders
38
+ batch_size = 16
39
+ train_dataset = IntentDataset(dataset["train"])
40
+ test_dataset = IntentDataset(dataset["test"])
41
+ train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
42
+ test_loader = DataLoader(test_dataset, batch_size=batch_size)
43
+
44
+ # Load Pretrained BERT Model
45
+ num_labels = len(set(dataset["train"]["intent"]))
46
+ model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_labels).to(device)
47
+
48
+ # Loss & Optimizer
49
+ criterion = nn.CrossEntropyLoss()
50
+ optimizer = optim.AdamW(model.parameters(), lr=2e-5)
51
+
52
+ # Training Loop
53
+ num_epochs = 3
54
+ for epoch in range(num_epochs):
55
+ model.train()
56
+ total_loss = 0
57
+ correct = 0
58
+ total = 0
59
+
60
+ for batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs} Training"):
61
+ inputs, labels = batch
62
+ inputs = {key: val.to(device) for key, val in inputs.items()}
63
+ labels = labels.to(device)
64
+
65
+ optimizer.zero_grad()
66
+ outputs = model(**inputs).logits
67
+ loss = criterion(outputs, labels)
68
+ loss.backward()
69
+ optimizer.step()
70
+
71
+ total_loss += loss.item()
72
+ correct += (outputs.argmax(dim=1) == labels).sum().item()
73
+ total += labels.size(0)
74
+
75
+ train_accuracy = correct / total
76
+ print(f"Epoch {epoch+1}/{num_epochs}, Loss: {total_loss:.4f}, Train Accuracy: {train_accuracy:.4f}")
77
+
78
+ # Evaluation on Test Set
79
+ model.eval()
80
+ all_preds, all_labels = [], []
81
+
82
+ with torch.no_grad():
83
+ for batch in tqdm(test_loader, desc="Testing"):
84
+ inputs, labels = batch
85
+ inputs = {key: val.to(device) for key, val in inputs.items()}
86
+ labels = labels.to(device)
87
+
88
+ outputs = model(**inputs).logits
89
+ preds = outputs.argmax(dim=1)
90
+
91
+ all_preds.extend(preds.cpu().numpy())
92
+ all_labels.extend(labels.cpu().numpy())
93
+
94
+ # Compute Metrics
95
+ accuracy = accuracy_score(all_labels, all_preds)
96
+ precision, recall, f1, _ = precision_recall_fscore_support(all_labels, all_preds, average="weighted")
97
+
98
+ print(f"Test Accuracy: {accuracy:.4f}")
99
+ print(f"Precision: {precision:.4f}, Recall: {recall:.4f}, F1-score: {f1:.4f}")
100
+
101
+ # Save Model
102
+ torch.save(model.state_dict(), "intent_classifier.pth")
intent_graphs.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import seaborn as sns
5
+ from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve
6
+ from sklearn.preprocessing import label_binarize
7
+ from transformers import BertTokenizer, BertForSequenceClassification
8
+ from datasets import load_dataset
9
+
10
+ # Check for CUDA
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+
13
+ # Load dataset
14
+ dataset = load_dataset("clinc_oos", "plus")
15
+ label_names = dataset["train"].features["intent"].names # Ensure correct order
16
+
17
+ # Load model
18
+ num_labels = len(label_names)
19
+ model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_labels)
20
+ model.load_state_dict(torch.load("intent_classifier.pth", map_location=device))
21
+ model.to(device)
22
+ model.eval()
23
+
24
+ # Load tokenizer
25
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
26
+
27
+ # Prepare data
28
+ true_labels = []
29
+ pred_labels = []
30
+ all_probs = []
31
+
32
+ for example in dataset["test"]:
33
+ sentence = example["text"]
34
+ true_label = example["intent"]
35
+
36
+ # Tokenize
37
+ inputs = tokenizer(sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128)
38
+ inputs = {key: val.to(device) for key, val in inputs.items()}
39
+
40
+ # Predict
41
+ with torch.no_grad():
42
+ outputs = model(**inputs)
43
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1).cpu().numpy()[0]
44
+ predicted_class = np.argmax(probs)
45
+
46
+ # Store results
47
+ true_labels.append(true_label)
48
+ pred_labels.append(predicted_class)
49
+ all_probs.append(probs)
50
+
51
+ # Convert to numpy arrays
52
+ true_labels = np.array(true_labels)
53
+ pred_labels = np.array(pred_labels)
54
+ all_probs = np.array(all_probs)
55
+
56
+ # Compute confusion matrix
57
+ conf_matrix = confusion_matrix(true_labels, pred_labels)
58
+
59
+ # Plot confusion matrix
60
+ plt.figure(figsize=(12, 10))
61
+ sns.heatmap(conf_matrix, annot=False, fmt="d", cmap="Blues")
62
+ plt.xlabel("Predicted Label")
63
+ plt.ylabel("True Label")
64
+ plt.title("Confusion Matrix for Intent Classification")
65
+ plt.savefig("confusion_matrix.png", dpi=300, bbox_inches="tight")
66
+ plt.close()
67
+
68
+ print("Confusion matrix saved as confusion_matrix.png")
69
+
70
+ # --- Multi-Class Precision-Recall Curve ---
71
+ # Binarize true labels for multi-class PR calculation
72
+ true_labels_bin = label_binarize(true_labels, classes=np.arange(num_labels))
73
+
74
+ # Plot Precision-Recall Curve for multiple classes
75
+ plt.figure(figsize=(10, 8))
76
+ for i in range(num_labels):
77
+ precision, recall, _ = precision_recall_curve(true_labels_bin[:, i], all_probs[:, i])
78
+ plt.plot(recall, precision, lw=1, alpha=0.7, label=f"Class {i}: {label_names[i]}")
79
+
80
+ plt.xlabel("Recall")
81
+ plt.ylabel("Precision")
82
+ plt.title("Multi-Class Precision-Recall Curve")
83
+ plt.legend(loc="best", fontsize=6, ncol=2, frameon=True)
84
+ plt.grid(True)
85
+ plt.savefig("precision_recall_curve.png", dpi=300, bbox_inches="tight")
86
+ plt.close()
87
+
88
+ print("Precision-Recall curve saved as precision_recall_curve.png")
intent_train.txt ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PS C:\Users\NAVYA\Documents\moodify> python intent_classifier.py
2
+ 2025-02-26 00:12:11.737923: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
3
+ 2025-02-26 00:12:13.232626: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
4
+ cuda
5
+ train-00000-of-00001.parquet: 100%|████████████████████████████████████████████████████████████████████████| 312k/312k [00:00<00:00, 2.83MB/s]
6
+ C:\Users\NAVYA\AppData\Local\Programs\Python\Python311\Lib\site-packages\huggingface_hub\file_download.py:142: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in C:\Users\NAVYA\.cache\huggingface\hub\datasets--clinc_oos. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.
7
+ To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development
8
+ warnings.warn(message)
9
+ validation-00000-of-00001.parquet: 100%|█████████████████████████████████████████████████████████████████| 77.8k/77.8k [00:00<00:00, 4.63MB/s]
10
+ test-00000-of-00001.parquet: 100%|█████████████████████████████████████████████████████████████████████████| 136k/136k [00:00<00:00, 4.81MB/s]
11
+ Generating train split: 100%|████████████████████████████████████████████████████████████████| 15250/15250 [00:00<00:00, 210143.07 examples/s]
12
+ Generating validation split: 100%|█████████████████████████████████████████████████████████████| 3100/3100 [00:00<00:00, 233598.79 examples/s]
13
+ Generating test split: 100%|███████████████████████████████████████████████████████████████████| 5500/5500 [00:00<00:00, 288149.49 examples/s]
14
+ Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']
15
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
16
+ Epoch 1/3 Training: 100%|███████████████████████████████████████████████████████████████████████████████████| 954/954 [04:26<00:00, 3.57it/s]
17
+ Epoch 1/3, Loss: 3449.6031, Train Accuracy: 0.4677
18
+ Epoch 2/3 Training: 100%|███████████████████████████████████████████████████████████████████████████████████| 954/954 [04:25<00:00, 3.60it/s]
19
+ Epoch 2/3, Loss: 1115.7661, Train Accuracy: 0.9301
20
+ Epoch 3/3 Training: 100%|███████████████████████████████████████████████████████████████████████████████████| 954/954 [04:24<00:00, 3.61it/s]
21
+ Epoch 3/3, Loss: 324.9103, Train Accuracy: 0.9817
22
+ Testing: 100%|██████████████████████████████████████████████████████████████████████████████████████████████| 344/344 [00:27<00:00, 12.57it/s]
23
+ Test Accuracy: 0.8800
24
+ Precision: 0.8978, Recall: 0.8800, F1-score: 0.8741
25
+ PS C:\Users\NAVYA\Documents\moodify>
model.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import numpy as np
5
+ from transformers import BertTokenizer, BertModel
6
+ from datasets import load_dataset
7
+ from sklearn.model_selection import train_test_split
8
+ from torch.utils.data import Dataset, DataLoader
9
+ from tqdm import tqdm
10
+ from sklearn.metrics import accuracy_score, f1_score
11
+
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+
14
+ dataset = load_dataset("go_emotions")
15
+
16
+ # Extract text and labels
17
+ texts = dataset["train"]["text"][:20000] # Increased dataset size
18
+ labels = dataset["train"]["labels"][:20000] # Increased dataset size
19
+
20
+ # Convert labels to categorical
21
+ def fix_labels(labels):
22
+ labels = [max(label) if label else 0 for label in labels] # Convert multi-label to single-label
23
+ return torch.tensor(labels, dtype=torch.long)
24
+
25
+ labels = fix_labels(labels)
26
+
27
+ # Split dataset
28
+ train_texts, val_texts, train_labels, val_labels = train_test_split(texts, labels, test_size=0.2, random_state=42)
29
+
30
+ # Tokenizer
31
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
32
+
33
+ # Tokenize text
34
+ def tokenize(texts):
35
+ return tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
36
+
37
+ train_encodings = tokenize(train_texts)
38
+ val_encodings = tokenize(val_texts)
39
+ train_encodings = {key: val.to(device) for key, val in train_encodings.items()}
40
+ val_encodings = {key: val.to(device) for key, val in val_encodings.items()}
41
+
42
+ class EmotionDataset(Dataset):
43
+ def __init__(self, encodings, labels):
44
+ self.encodings = encodings
45
+ self.labels = labels
46
+
47
+ def __len__(self):
48
+ return len(self.labels)
49
+
50
+ def __getitem__(self, idx):
51
+ item = {key: val[idx] for key, val in self.encodings.items()}
52
+ item["labels"] = self.labels[idx]
53
+ return item
54
+
55
+ train_dataset = EmotionDataset(train_encodings, train_labels)
56
+ val_dataset = EmotionDataset(val_encodings, val_labels)
57
+
58
+ train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
59
+ val_loader = DataLoader(val_dataset, batch_size=16)
60
+
61
+ class BertGRUClassifier(nn.Module):
62
+ def __init__(self, bert_model="bert-base-uncased", hidden_dim=128, num_classes=28):
63
+ super(BertGRUClassifier, self).__init__()
64
+ self.bert = BertModel.from_pretrained(bert_model)
65
+ self.gru = nn.GRU(self.bert.config.hidden_size, hidden_dim, batch_first=True)
66
+ self.dropout = nn.Dropout(0.3) # Added dropout layer
67
+ self.fc = nn.Linear(hidden_dim, num_classes)
68
+
69
+ def forward(self, input_ids, attention_mask):
70
+ with torch.no_grad():
71
+ bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask)
72
+ gru_output, _ = self.gru(bert_output.last_hidden_state)
73
+ output = self.fc(self.dropout(gru_output[:, -1, :])) # Apply dropout
74
+ return output
75
+
76
+ model = BertGRUClassifier()
77
+ model.to(device)
78
+
79
+ criterion = nn.CrossEntropyLoss()
80
+ optimizer = optim.Adam(model.parameters(), lr=2e-5)
81
+ scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1) # Added learning rate scheduler
82
+
83
+ def evaluate_model(model, data_loader):
84
+ model.eval()
85
+ predictions, true_labels = [], []
86
+
87
+ with torch.no_grad():
88
+ for batch in data_loader:
89
+ input_ids = batch["input_ids"].to(device)
90
+ attention_mask = batch["attention_mask"].to(device)
91
+ labels = batch["labels"].to(device)
92
+
93
+ outputs = model(input_ids, attention_mask)
94
+ preds = torch.argmax(outputs, dim=1).cpu().numpy()
95
+ predictions.extend(preds)
96
+ true_labels.extend(labels.cpu().numpy())
97
+
98
+ acc = accuracy_score(true_labels, predictions)
99
+ f1 = f1_score(true_labels, predictions, average='weighted')
100
+ return acc, f1
101
+
102
+ def train_model(model, train_loader, val_loader, epochs=10): # Increased number of epochs
103
+ for epoch in range(epochs):
104
+ model.train()
105
+ total_loss = 0
106
+
107
+ for batch in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs}"):
108
+ input_ids = batch["input_ids"].to(device)
109
+ attention_mask = batch["attention_mask"].to(device)
110
+ labels = batch["labels"].to(device)
111
+
112
+ optimizer.zero_grad()
113
+ outputs = model(input_ids, attention_mask)
114
+ loss = criterion(outputs, labels)
115
+ loss.backward()
116
+ optimizer.step()
117
+
118
+ total_loss += loss.item()
119
+
120
+ scheduler.step() # Step the scheduler
121
+
122
+ train_acc, train_f1 = evaluate_model(model, train_loader)
123
+ val_acc, val_f1 = evaluate_model(model, val_loader)
124
+ print(f"Epoch {epoch + 1}, Loss: {total_loss / len(train_loader)}, Train Acc: {train_acc:.4f}, Train F1: {train_f1:.4f}, Val Acc: {val_acc:.4f}, Val F1: {val_f1:.4f}")
125
+
126
+ # Save the model after each epoch
127
+ torch.save(model.state_dict(), f"model_epoch_{epoch + 1}.pth")
128
+
129
+ train_model(model, train_loader, val_loader)
130
+
131
+ # Assuming you have a test dataset
132
+ test_texts = dataset["test"]["text"]
133
+ test_labels = fix_labels(dataset["test"]["labels"])
134
+ test_encodings = tokenize(test_texts)
135
+ test_encodings = {key: val.to(device) for key, val in test_encodings.items()}
136
+ test_dataset = EmotionDataset(test_encodings, test_labels)
137
+ test_loader = DataLoader(test_dataset, batch_size=16)
138
+
139
+ test_acc, test_f1 = evaluate_model(model, test_loader)
140
+ print(f"Test Accuracy: {test_acc:.4f}, Test F1 Score: {test_f1:.4f}")
mood_classifier.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from transformers import BertTokenizer, BertForSequenceClassification
5
+ from datasets import load_dataset
6
+ from torch.utils.data import DataLoader, Dataset, random_split
7
+ from tqdm import tqdm
8
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
9
+
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+
12
+ # Load GoEmotions dataset
13
+ dataset = load_dataset("go_emotions", split="train")
14
+ dataset = dataset.map(lambda x: {"label": x["labels"][0]}) # Convert multi-label to single-label
15
+
16
+ labels = list(set(dataset["label"])) # Unique labels
17
+ num_labels = len(labels)
18
+
19
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
20
+
21
+ class MoodDataset(Dataset):
22
+ def __init__(self, texts, labels):
23
+ self.texts = texts
24
+ self.labels = labels
25
+
26
+ def __len__(self):
27
+ return len(self.texts)
28
+
29
+ def __getitem__(self, idx):
30
+ inputs = tokenizer(self.texts[idx], return_tensors="pt", padding="max_length", truncation=True, max_length=128)
31
+ return {key: val.squeeze(0) for key, val in inputs.items()}, torch.tensor(labels.index(self.labels[idx]))
32
+
33
+ dataset = MoodDataset(dataset["text"], dataset["label"])
34
+ train_size = int(0.8 * len(dataset))
35
+ train_set, test_set = random_split(dataset, [train_size, len(dataset) - train_size])
36
+
37
+ train_loader = DataLoader(train_set, batch_size=32, shuffle=True)
38
+ test_loader = DataLoader(test_set, batch_size=32)
39
+
40
+ model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_labels).to(device)
41
+ optimizer = optim.AdamW(model.parameters(), lr=2e-5)
42
+ criterion = nn.CrossEntropyLoss()
43
+
44
+ num_epochs = 3
45
+ for epoch in range(num_epochs):
46
+ model.train()
47
+ epoch_loss, correct, total = 0, 0, 0
48
+ preds, labels_list = [], []
49
+
50
+ for batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs} Training"):
51
+ optimizer.zero_grad()
52
+ inputs = {key: val.to(device) for key, val in batch[0].items()}
53
+ labels = batch[1].to(device)
54
+
55
+ outputs = model(**inputs).logits
56
+ loss = criterion(outputs, labels)
57
+
58
+ loss.backward()
59
+ optimizer.step()
60
+
61
+ epoch_loss += loss.item()
62
+ correct += (outputs.argmax(dim=1) == labels).sum().item()
63
+ total += labels.size(0)
64
+
65
+ preds.extend(outputs.argmax(dim=1).cpu().numpy())
66
+ labels_list.extend(labels.cpu().numpy())
67
+
68
+ train_acc = accuracy_score(labels_list, preds)
69
+ precision, recall, f1, _ = precision_recall_fscore_support(labels_list, preds, average="weighted")
70
+
71
+ print(f"Epoch {epoch+1}: Loss: {epoch_loss:.4f}, Train Acc: {train_acc:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}")
72
+
73
+ # **Evaluate on Test Set**
74
+ model.eval()
75
+ test_preds, test_labels = [], []
76
+
77
+ with torch.no_grad():
78
+ for batch in tqdm(test_loader, desc="Evaluating on Test Set"):
79
+ inputs = {key: val.to(device) for key, val in batch[0].items()}
80
+ labels = batch[1].to(device)
81
+
82
+ outputs = model(**inputs).logits
83
+ test_preds.extend(outputs.argmax(dim=1).cpu().numpy())
84
+ test_labels.extend(labels.cpu().numpy())
85
+
86
+ test_acc = accuracy_score(test_labels, test_preds)
87
+ precision, recall, f1, _ = precision_recall_fscore_support(test_labels, test_preds, average="weighted")
88
+
89
+ print(f"Test Accuracy: {test_acc:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f}, F1-score: {f1:.4f}")
90
+
91
+ # Save model
92
+ model.save_pretrained("mood_classifier")
precision_recall_curve.png ADDED

Git LFS Details

  • SHA256: 105db3b163f9133dd523c1e8dea090299bd6e17d21206803131ce26a623adcdb
  • Pointer size: 131 Bytes
  • Size of remote file: 808 kB
predict_emotions.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import BertTokenizer, DistilBertForSequenceClassification
3
+
4
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
5
+
6
+ # Load the trained model and tokenizer
7
+ try:
8
+ model = DistilBertForSequenceClassification.from_pretrained("./saved_model")
9
+ tokenizer = BertTokenizer.from_pretrained("./saved_model")
10
+ except Exception as e:
11
+ print(f"Error loading model or tokenizer: {e}")
12
+ exit()
13
+
14
+ model.to(device)
15
+ model.eval()
16
+
17
+ # Define the sentences
18
+ sentences = [
19
+ "I am so happy today!",
20
+ "This is the worst day ever.",
21
+ "I feel so loved and appreciated.",
22
+ "I am really angry right now.",
23
+ "I am so done cant take this anymore",
24
+ "i have to finish this report by tomorrow but so tired",
25
+ "let's do it",
26
+ "i have got this,, yayyyy",
27
+ "energetic",
28
+ "worst tired lazy",
29
+ "I am feeling very sad and lonely."
30
+ ]
31
+
32
+ # Define the label names
33
+ label_names = ["admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief", "remorse", "sadness", "surprise", "neutral"]
34
+
35
+ def predict_emotion(sentence):
36
+ inputs = tokenizer(sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128)
37
+ inputs = {key: val.to(device) for key, val in inputs.items() if key != "token_type_ids"}
38
+
39
+ with torch.no_grad():
40
+ outputs = model(**inputs)
41
+ predicted_class = torch.argmax(outputs.logits, dim=1).cpu().numpy()[0]
42
+
43
+ return predicted_class, label_names[predicted_class]
44
+
45
+ # Predict emotions for the sentences
46
+ for sentence in sentences:
47
+ predicted_emotion, predicted_label_name = predict_emotion(sentence)
48
+ print(f"Predicted emotion for '{sentence}': {predicted_emotion} ({predicted_label_name})")
predict_intent.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import BertTokenizer, BertForSequenceClassification
3
+ from datasets import load_dataset
4
+ from collections import Counter
5
+
6
+ # Check for CUDA
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ # Load dataset and get correct label names
10
+ dataset = load_dataset("clinc_oos", "plus")
11
+ label_names = dataset["train"].features["intent"].names # Ensure correct order
12
+
13
+ # Debugging check
14
+ print(f"Total labels: {len(label_names)}") # Should print 151
15
+ print("Sample labels:", label_names[:10]) # Print first 10 labels
16
+
17
+ # Load the trained model
18
+ num_labels = len(label_names) # Should be 151
19
+ model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_labels)
20
+ model.load_state_dict(torch.load("intent_classifier.pth", map_location=device))
21
+ model.to(device)
22
+ model.eval()
23
+
24
+ # Load tokenizer
25
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
26
+
27
+ def predict_intent(sentence):
28
+ inputs = tokenizer(sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128)
29
+ inputs = {key: val.to(device) for key, val in inputs.items()}
30
+
31
+ with torch.no_grad():
32
+ outputs = model(**inputs)
33
+ predicted_class = torch.argmax(outputs.logits, dim=1).cpu().numpy()[0]
34
+
35
+ if predicted_class >= len(label_names): # Prevent out-of-range errors
36
+ print(f"Warning: Predicted class {predicted_class} is out of range!")
37
+ return predicted_class, "Unknown Label"
38
+
39
+ return predicted_class, label_names[predicted_class]
40
+
41
+ # Example usage
42
+ sentence = "I need to attend a meeting but so tired but important"
43
+ predicted_intent, predicted_label_name = predict_intent(sentence)
44
+ print(f"Predicted intent for '{sentence}': {predicted_intent} ({predicted_label_name})")
45
+
46
+ # # Fix: Count labels correctly from dataset["train"]
47
+ # label_counts = Counter([label_names[label] for label in dataset["train"]["intent"]])
48
+ # print("Label distribution:", label_counts) # Print top 10 most common labels
requirements.txt CHANGED
@@ -1,3 +1,13 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
 
 
 
 
 
1
+ together
2
+ python-dotenv
3
+ torch
4
+ transformers==4.35.2
5
+ tokenizers==0.15.0
6
+ gdown
7
+ streamlit
8
+ pandas
9
+
10
+ transformers==4.35.2
11
+ tokenizers==0.15.0
12
+
13
+
task.py ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from together import Together
6
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, BertTokenizer,DistilBertTokenizer, BertForSequenceClassification, DistilBertForSequenceClassification
7
+ from datetime import datetime, timedelta
8
+ import pandas as pd
9
+ from task_css import get_custom_css # Import the custom CSS function
10
+ import gdown
11
+
12
+ # Set environment variable for offline mode
13
+ os.environ["TRANSFORMERS_OFFLINE"] = "1"
14
+
15
+ # Load environment variables
16
+ load_dotenv()
17
+
18
+ # Together AI Client with API key from environment variable
19
+ client = Together(api_key=os.getenv("TOGETHER_API_KEY", ""))
20
+
21
+ # Set device
22
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
+
24
+ # Load Intent Model
25
+ intent_model_path = "intent_classifier.pth"
26
+ # Extract file ID from Google Drive URL
27
+ file_id = "1_GDGvV3MVvBguIsjMyDLg3RxUV_gnFAY"
28
+ num_intent_labels = 151 # Moved this up before model creation
29
+
30
+ # Load Emotion Model
31
+ emotions_model_path = "./saved_model"
32
+ emotions_folder_id = "1gYWkbC_XBw_GZjsfwXvubHFil4BCq_gH"
33
+
34
+ # Add new pretrained model ID
35
+ pretrained_folder_id = "13t_EB2LFhRIwb3dkKDtA0O5NXXZBoG-j"
36
+
37
+ # Initialize Session State
38
+ if "is_ready" not in st.session_state:
39
+ st.session_state.is_ready = False
40
+ st.session_state.models = {} # Initialize models dict immediately
41
+ st.session_state.tasks = []
42
+ st.session_state.task_counter = 0
43
+ st.session_state.overall_emotion = None
44
+ st.session_state.overall_emotion_label = "Neutral"
45
+
46
+ # Page Configuration first
47
+ st.set_page_config(
48
+ page_title="🚀 AI Productivity Assistant",
49
+ layout="wide",
50
+ page_icon="🎯"
51
+ )
52
+
53
+ # Custom CSS for enhanced styling
54
+ st.markdown(get_custom_css(), unsafe_allow_html=True)
55
+
56
+ # Show loading screen if models aren't ready
57
+ if not st.session_state.is_ready:
58
+ st.markdown(
59
+ """
60
+ <div class="loading-container" style="text-align: center; padding: 50px;">
61
+ <div class="loading-spinner"></div>
62
+ <h2>Setting up your AI assistant...</h2>
63
+ <p>This may take a minute. We're downloading the required models.</p>
64
+ </div>
65
+ """,
66
+ unsafe_allow_html=True
67
+ )
68
+
69
+ # Load models here
70
+ try:
71
+ # First download pretrained models
72
+ if not os.path.exists("pretrained_models"):
73
+ with st.status("Downloading base models...", expanded=True) as status:
74
+ os.makedirs("pretrained_models", exist_ok=True)
75
+ gdown.download_folder(
76
+ f"https://drive.google.com/drive/folders/{pretrained_folder_id}",
77
+ output="pretrained_models",
78
+ quiet=False
79
+ )
80
+ status.update(label="Base models downloaded!", state="complete")
81
+
82
+ # Intent Model Loading
83
+ if not os.path.exists(intent_model_path):
84
+ with st.status("Downloading intent model...", expanded=True) as status:
85
+ output = gdown.download(
86
+ f"https://drive.google.com/uc?id={file_id}",
87
+ intent_model_path,
88
+ quiet=False
89
+ )
90
+ status.update(label="Intent model downloaded!", state="complete")
91
+
92
+ # Emotion Model Loading
93
+ if not os.path.exists(emotions_model_path):
94
+ with st.status("Downloading emotion model...", expanded=True) as status:
95
+ os.makedirs(emotions_model_path, exist_ok=True)
96
+ gdown.download_folder(
97
+ f"https://drive.google.com/drive/folders/{emotions_folder_id}",
98
+ output=emotions_model_path,
99
+ quiet=False
100
+ )
101
+ status.update(label="Emotion model downloaded!", state="complete")
102
+
103
+ # Load and store intent model
104
+ intent_model = AutoModelForSequenceClassification.from_pretrained(
105
+ "pretrained_models/bert-base-uncased",
106
+ num_labels=num_intent_labels,
107
+ ignore_mismatched_sizes=True, # Add this parameter
108
+ local_files_only=True
109
+ )
110
+ intent_model.load_state_dict(
111
+ torch.load(intent_model_path, map_location=device, weights_only=True)
112
+ )
113
+ st.session_state.models["intent_model"] = intent_model.to(device).eval()
114
+ st.session_state.models["intent_tokenizer"] = AutoTokenizer.from_pretrained(
115
+ "pretrained_models/bert-base-uncased",
116
+ local_files_only=True
117
+ )
118
+
119
+ # Load and store emotion model
120
+ emotions_model = AutoModelForSequenceClassification.from_pretrained(
121
+ emotions_model_path,
122
+ ignore_mismatched_sizes=True, # Add this parameter
123
+ local_files_only=True
124
+ )
125
+ st.session_state.models["emotions_model"] = emotions_model.to(device).eval()
126
+ st.session_state.models["emotions_tokenizer"] = AutoTokenizer.from_pretrained(
127
+ emotions_model_path,
128
+ local_files_only=True
129
+ )
130
+
131
+ # Set ready state
132
+ st.session_state.is_ready = True
133
+ st.rerun()
134
+
135
+ except Exception as e:
136
+ st.error(f"Error loading models: {str(e)}")
137
+ st.stop()
138
+
139
+ # Only show main app if models are ready
140
+ if st.session_state.is_ready:
141
+ # Title with custom styling
142
+ st.markdown('<div class="main-header">🎯 MoodifyTask: AI Task Prioritization & Wellness Assistant</div>', unsafe_allow_html=True)
143
+
144
+ # Emotion Labels
145
+ emotion_label_names = [
146
+ "admiration", "amusement", "anger", "annoyance", "approval",
147
+ "caring", "confusion", "curiosity", "desire", "disappointment",
148
+ "disapproval", "disgust", "embarrassment", "excitement", "fear",
149
+ "gratitude", "grief", "joy", "love", "nervousness",
150
+ "optimism", "pride", "realization", "relief", "remorse",
151
+ "sadness", "surprise", "neutral"
152
+ ]
153
+
154
+ # Emotion Categories
155
+ positive_emotions = ["admiration", "amusement", "approval", "caring", "curiosity", "excitement", "gratitude", "joy", "love", "optimism", "pride", "relief", "surprise"]
156
+ negative_emotions = ["anger", "annoyance", "disappointment", "disapproval", "disgust", "embarrassment", "fear", "grief", "nervousness", "remorse", "sadness"]
157
+ neutral_emotions = ["realization", "neutral"]
158
+
159
+ # Predict Intent
160
+ def predict_intent(sentence):
161
+ inputs = st.session_state.models["intent_tokenizer"](
162
+ sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128
163
+ )
164
+ inputs = {key: val.to(device) for key, val in inputs.items()}
165
+ with torch.no_grad():
166
+ outputs = st.session_state.models["intent_model"](**inputs)
167
+ predicted_class = torch.argmax(outputs.logits, dim=1).cpu().numpy()[0]
168
+
169
+ # Mapping Intent IDs to Priorities (0-150)
170
+ PRIORITY_MAPPING = {
171
+ 5: [8, 35, 42, 74, 97, 110, 118, 120, 124, 136], # freeze_account, report_lost_card, flight_status, report_fraud, credit_limit, lost_luggage, dispute_charge, overdraft, cancel_reservation, emergency
172
+ 4: [14, 15, 19, 20, 39, 47, 48, 49, 50, 69, 70, 71, 72], # bill_balance, bill_due, exchange_rate, credit_score, interest_rate, insurance, medical_expenses, appointment_schedule, meeting_schedule, dentist_appointment, doctor_appointment, prescription_refill, pharmacy_hours
173
+ 3: [33, 34, 41, 51, 56, 57, 62, 66, 77, 78, 85], # hotel_reservation, car_rental, restaurant_reservation, tracking_package, check_in, check_out, traffic_update, directions, smart_home_on, smart_home_off, weather_forecast
174
+ 2: [0, 1, 3, 6, 9, 13, 16, 17, 21, 25, 27, 28, 36, 40, 45, 52, 61], # restaurant_reviews, shopping_list, what_song, schedule_meeting, translate, play_music, book_hotel, book_flight, gas_prices, exchange_rate, movie_showtimes, recipe, cancel_flight, book_reservation, order_food, car_services, joke
175
+ 1: [2, 4, 5, 7, 10, 11, 12, 18, 22, 23, 24, 26, 30, 31, 32, 37, 38, 43, 44, 46, 53, 54, 55, 58, 59, 60, 63, 64, 65, 67, 68, 73]
176
+ # tell_joke, fun_fact, trivia, horoscope, dog_fact, cat_fact, define_word, stock_price, sports_update, lottery_results, currency_conversion, holiday_list, language_learning, random_fact, poem, quote, daily_horoscope, joke_request, music_recommendation, podcast_recommendation, celebrity_gossip, movie_recommendation, TV_show_recommendation, book_recommendation, game_recommendation, radio_recommendation, trivia_game, riddle, name_meaning, birthday_reminder, anniversary_reminder, affirmations
177
+ }
178
+
179
+ # Find the priority based on predicted_class
180
+ predicted_intent_score = next((priority for priority, ids in PRIORITY_MAPPING.items() if predicted_class in ids), 1) # Default to 1 if not found
181
+
182
+ return predicted_intent_score
183
+
184
+ # Emotion to Numeric Score Mapping
185
+ EMOTION_MAPPING = {
186
+ "admiration": 4, "amusement": 3, "anger": 5, "annoyance": 4, "approval": 3,
187
+ "caring": 4, "confusion": 3, "curiosity": 3, "desire": 4, "disappointment": 4,
188
+ "disapproval": 4, "disgust": 5, "embarrassment": 4, "excitement": 5, "fear": 5,
189
+ "gratitude": 3, "grief": 5, "joy": 5, "love": 5, "nervousness": 4,
190
+ "optimism": 4, "pride": 4, "realization": 3, "relief": 3, "remorse": 4,
191
+ "sadness": 5, "surprise": 3, "neutral": 3
192
+ }
193
+
194
+ # Function to get numeric emotion score
195
+ def get_emotion_score(emotion):
196
+ return EMOTION_MAPPING.get(emotion.lower(), 3) # Default to 3 if not found
197
+ # Predict Emotion
198
+ def predict_emotion(sentence):
199
+ if not sentence.strip():
200
+ return 3, "neutral"
201
+ # Ensure the input is a full sentence
202
+ if len(sentence.split()) == 1:
203
+ sentence = f"I feel {sentence}"
204
+ inputs = st.session_state.models["emotions_tokenizer"](
205
+ sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128
206
+ )
207
+ inputs = {key: val.to(device) for key, val in inputs.items() if key != "token_type_ids"}
208
+
209
+ with torch.no_grad():
210
+ outputs = st.session_state.models["emotions_model"](**inputs)
211
+ predicted_class = torch.argmax(outputs.logits, dim=1).cpu().numpy()[0]
212
+
213
+ detected_emotion = emotion_label_names[predicted_class]
214
+
215
+ # Manually adjust for stress/pressure-related words
216
+ stress_keywords = ["stress", "stressed", "overwhelmed", "pressure", "tense", "burnout"]
217
+ if any(word in sentence.lower() for word in stress_keywords):
218
+ if detected_emotion not in ["sadness", "nervousness"]:
219
+ detected_emotion = "nervousness" # Change to "sadness" if you prefer
220
+
221
+ emotion_score = get_emotion_score(detected_emotion)
222
+ if emotion_score is None:
223
+ emotion_score = 3 # Default neutral score
224
+
225
+ return emotion_score, detected_emotion
226
+
227
+
228
+ # Get Emotion Category
229
+ def get_emotion_category(emotion):
230
+ if emotion in positive_emotions:
231
+ return "positive"
232
+ elif emotion in negative_emotions:
233
+ return "negative"
234
+ else:
235
+ return "neutral"
236
+
237
+
238
+ def normalize_priority(priority, min_value=0, max_value=10):
239
+ return (priority - min_value) / (max_value - min_value) # Normalize between 0-1
240
+
241
+ # Calculate Task Priority
242
+ def calculate_priority_score(predicted_intent_score,emotion_score, emotion, time_remaining, complexity, emotion_category):
243
+ """
244
+ Calculate an adaptive priority score for tasks based on intent, emotion, time urgency, and complexity.
245
+ """
246
+ emotion_score = emotion_score if emotion_score is not None else 3
247
+ # Normalize time urgency (scale 0 to 1 based on 7 days)
248
+ time_score = max(0, min(1, 1 - (time_remaining.total_seconds() / (7 * 24 * 3600))))
249
+
250
+ # Set emotion-based adjustments
251
+ stress_emotions = ["nervousness", "sadness", "fear"]
252
+ frustration_emotions = ["anger", "frustration","disappointment","annoyance"]
253
+ anxiety_emotions = ["anxiety", "uncertainty"]
254
+
255
+
256
+ if emotion_category == "negative":
257
+ if emotion in stress_emotions:
258
+ # Prioritize **easy, quick** tasks to reduce cognitive load
259
+ priority = (predicted_intent_score * 0.15) + (emotion_score * 0.1) + (time_score * 0.3) + ((10 - complexity) * 0.45)
260
+
261
+ elif emotion in frustration_emotions:
262
+ # Prioritize **engaging** tasks (not too easy) but keep urgency in mind
263
+ priority = (predicted_intent_score * 0.2) + (emotion_score * 0.15) + (time_score * 0.25) + (complexity * 0.4)
264
+
265
+ elif emotion in anxiety_emotions:
266
+ # Prioritize **urgent, low-complexity** tasks
267
+ priority = (predicted_intent_score * 0.2) + (emotion_score * 0.1) + (time_score * 0.4) + ((10 - complexity) * 0.3)
268
+
269
+ else:
270
+ # Default for negative emotions: balance urgency and ease
271
+ priority = (predicted_intent_score * 0.2) + (emotion_score * 0.1) + (time_score * 0.3) + ((10 - complexity) * 0.4)
272
+
273
+ elif emotion_category == "positive":
274
+ # If the user is in a **good mood**, favor challenging, high-impact tasks
275
+ priority = (predicted_intent_score * 0.35) + (emotion_score * 0.2) + (time_score * 0.25) + (complexity * 0.2)
276
+
277
+ else: # Neutral emotion
278
+ # Keep a balance between difficulty and urgency
279
+ priority = (predicted_intent_score * 0.3) + (emotion_score * 0.2) + (time_score * 0.2) + (complexity * 0.3)
280
+
281
+ return normalize_priority(priority) # Ensure no negative priority values
282
+
283
+
284
+
285
+
286
+ # AI-Generated Plan Based on Start Time
287
+ from datetime import datetime
288
+
289
+ def get_llama_suggestion(emotion, tasks, selected_datetime):
290
+ """Generate AI plan based on full datetime instead of just time"""
291
+ # Sort tasks by priority (higher priority first)
292
+ sorted_tasks = sorted(tasks, key=lambda x: x["priority_score"], reverse=True)
293
+
294
+ # Filter tasks based on selected datetime
295
+ filtered_tasks = [
296
+ task for task in sorted_tasks
297
+ if task["due_date_time"] >= selected_datetime
298
+ ]
299
+
300
+ if not filtered_tasks:
301
+ well_being_prompts = {
302
+ "nervousness": "Suggest mindfulness exercises and short relaxation techniques.",
303
+ "sadness": "Suggest comforting activities like journaling or light exercise.",
304
+ "anger": "Suggest ways to channel frustration productively.",
305
+ "joy": "Suggest ways to maintain productivity while feeling good.",
306
+ "neutral": "Suggest general relaxation activities like listening to music."
307
+ }
308
+ well_being_prompt = f"""
309
+ The user is feeling {emotion}.
310
+ They have no tasks scheduled after {selected_datetime.strftime('%B %d, %I:%M %p')}.
311
+ {well_being_prompts.get(emotion, 'Provide general well-being tips.')}
312
+ """
313
+ try:
314
+ response = client.chat.completions.create(
315
+ messages=[{"role": "user", "content": well_being_prompt}],
316
+ model="meta-llama/Llama-3.3-70B-Instruct-Turbo",
317
+ temperature=0.7,
318
+ )
319
+ return response.choices[0].message.content
320
+ except Exception as e:
321
+ return f"Error generating well-being tips: {e}"
322
+
323
+ # Prepare the prompt with more detailed datetime information
324
+ task_details = "\n".join([
325
+ f"- {task['description']} (Priority: {task['priority_score']:.2f}, Complexity: {task['complexity']}, Due: {task['due_date_time'].strftime('%B %d, %I:%M %p')})"
326
+ for task in filtered_tasks
327
+ ])
328
+
329
+ prompt = f"""
330
+ The user is feeling {emotion}.
331
+ They need a structured productivity plan starting from {selected_datetime.strftime('%B %d, %I:%M %p')}, not the current time.
332
+
333
+ Their prioritized tasks (due on or after the selected time), sorted by priority score:
334
+ {task_details}
335
+
336
+ Please provide:
337
+ 1. A detailed schedule with specific times for each task
338
+ 2. Strategic breaks based on task complexity and emotional state
339
+ 3. Wellness activities that complement their current emotion
340
+ 4. Tips for managing tasks effectively given their emotional state
341
+ 5. Suggestions for handling high-priority tasks first while maintaining well-being
342
+ """
343
+
344
+ try:
345
+ response = client.chat.completions.create(
346
+ messages=[{"role": "user", "content": prompt}],
347
+ model="meta-llama/Llama-3.3-70B-Instruct-Turbo",
348
+ temperature=0.7,
349
+ )
350
+ return response.choices[0].message.content
351
+ except Exception as e:
352
+ return f"Error generating AI plan: {e}"
353
+
354
+
355
+ # Layout with improved spacing
356
+ col1, col2 = st.columns([1, 1], gap="medium")
357
+
358
+ with col1:
359
+ # st.markdown('<div class="emotion-analysis">', unsafe_allow_html=True)
360
+ st.markdown('<h3>🌟 Mood Analysis</h3>', unsafe_allow_html=True)
361
+ emotion_sentence = st.text_area(
362
+ "Describe how you're feeling today:",
363
+ value="",
364
+ height=150,
365
+ help="Your emotional state helps us prioritize tasks more effectively"
366
+ )
367
+
368
+ if emotion_sentence:
369
+ emotion_score, emotion_label = predict_emotion(emotion_sentence)
370
+ st.session_state.overall_emotion = emotion_score
371
+ st.session_state.overall_emotion_label = emotion_label
372
+
373
+ st.markdown(f'<div class="emotion-badge">Detected Emotion: {emotion_label}</div>', unsafe_allow_html=True)
374
+
375
+ # Emotion-based task reprioritization
376
+ for task in st.session_state.tasks:
377
+ task["priority_score"] = calculate_priority_score(
378
+ task["predicted_intent_score"],
379
+ emotion_score,
380
+ emotion_label,
381
+ task["time_remaining"],
382
+ task["complexity"],
383
+ get_emotion_category(emotion_label)
384
+ )
385
+ st.markdown('</div>', unsafe_allow_html=True)
386
+
387
+ with col2:
388
+ # st.markdown('<div class="task-input">', unsafe_allow_html=True)
389
+ st.markdown('<h3>📅 Add New Task</h3>', unsafe_allow_html=True)
390
+ with st.form("task_form", clear_on_submit=True):
391
+ task_description = st.text_input("Task Description", help="Be specific about what needs to be done")
392
+ col_date, col_time = st.columns(2)
393
+
394
+ with col_date:
395
+ due_date = st.date_input("Due Date")
396
+
397
+ with col_time:
398
+ due_time = st.time_input("Due Time")
399
+
400
+ complexity = st.slider(
401
+ "Task Complexity (1-10)",
402
+ 1, 10, 5,
403
+ help="Higher complexity may affect task priority"
404
+ )
405
+
406
+ submitted = st.form_submit_button("➕ Add Task")
407
+
408
+ if submitted and task_description and due_date and due_time:
409
+ due_date_time = datetime.combine(due_date, due_time)
410
+ time_remaining = due_date_time - datetime.now()
411
+ predicted_intent_score = predict_intent(task_description)
412
+
413
+ task = {
414
+ "id": st.session_state.task_counter, # Add unique ID
415
+ "description": task_description,
416
+ "due_date_time": due_date_time,
417
+ "time_remaining": time_remaining,
418
+ "complexity": complexity,
419
+ "predicted_intent_score": predicted_intent_score,
420
+ "predicted_emotion": st.session_state.overall_emotion,
421
+ "predicted_label_name": st.session_state.overall_emotion_label,
422
+ "priority_score": calculate_priority_score(
423
+ predicted_intent_score,
424
+ st.session_state.overall_emotion,
425
+ st.session_state.overall_emotion_label,
426
+ time_remaining,
427
+ complexity,
428
+ get_emotion_category(st.session_state.overall_emotion_label)
429
+ ),
430
+ "completed": False
431
+ }
432
+
433
+ st.session_state.tasks.append(task)
434
+ st.session_state.task_counter += 1 # Increment counter
435
+ st.success("✅ Task Added Successfully!")
436
+ st.markdown('</div>', unsafe_allow_html=True)
437
+
438
+ # Task List with Improved Visualization
439
+ if st.session_state.tasks:
440
+ st.markdown('<h3>📌 Task Priority List</h3>', unsafe_allow_html=True)
441
+
442
+ # Sort tasks by priority
443
+ sorted_tasks = sorted(st.session_state.tasks, key=lambda x: x["priority_score"], reverse=True)
444
+
445
+ # Create task overview cards
446
+ st.markdown('<div class="task-overview">', unsafe_allow_html=True)
447
+ col1, col2 = st.columns(2)
448
+ with col1:
449
+ st.markdown(f'<div class="metric-card"><div class="metric-value">{len(sorted_tasks)}</div><div class="metric-label">Total Tasks</div></div>', unsafe_allow_html=True)
450
+ # with col2:
451
+ # high_priority = len([t for t in sorted_tasks if t["priority_score"] > 0.7])
452
+ # st.markdown(f'<div class="metric-card"><div class="metric-value">{high_priority}</div><div class="metric-label">High Priority</div></div>', unsafe_allow_html=True)
453
+ with col2:
454
+ today = datetime.now()
455
+ due_today = len([t for t in sorted_tasks if t["due_date_time"].date() == today.date()])
456
+ st.markdown(f'<div class="metric-card"><div class="metric-value">{due_today}</div><div class="metric-label">Due Today</div></div>', unsafe_allow_html=True)
457
+ st.markdown('</div>', unsafe_allow_html=True)
458
+
459
+ # Display tasks with priority-based styling
460
+ for idx, task in enumerate(sorted_tasks):
461
+ priority_class = "high-priority" if task["priority_score"] > 0.7 else "medium-priority"
462
+
463
+ # Create a single row for task and buttons
464
+ task_container = st.container()
465
+ with task_container:
466
+ cols = st.columns([0.8, 0.1, 0.1])
467
+
468
+ # Task content in first column
469
+ with cols[0]:
470
+ st.markdown(f"""
471
+ <div class="priority-task {priority_class}">
472
+ <div class="task-content">
473
+ <div class="task-header">
474
+ <span class="task-title">{task["description"]}</span>
475
+ <span class="priority-score">Priority: {task["priority_score"]:.2f}</span>
476
+ </div>
477
+ <div class="task-details">
478
+ <span class="task-stat">Due: {task["due_date_time"].strftime("%d %b, %I:%M %p")}</span>
479
+ <span class="task-stat">Complexity: {task["complexity"]}</span>
480
+ </div>
481
+ </div>
482
+ </div>
483
+ """, unsafe_allow_html=True)
484
+ st.session_state.editing_task_id = None
485
+ # Edit button
486
+ with cols[1]:
487
+ if st.button("✏️", key=f"edit_{idx}", help="Edit task"):
488
+ st.session_state.editing_task_id = idx
489
+
490
+ # Delete button
491
+ with cols[2]:
492
+ if st.button("🗑️", key=f"delete_{idx}", help="Delete task"):
493
+ st.session_state.tasks.pop(idx)
494
+ st.success("Task deleted!")
495
+ st.rerun()
496
+
497
+ # Show edit form below the task if being edited
498
+ if st.session_state.editing_task_id == idx:
499
+ with st.form(key=f"edit_form_{idx}"):
500
+ col1, col2 = st.columns(2)
501
+ with col1:
502
+ new_description = st.text_input("Description", value=task["description"])
503
+ new_complexity = st.slider("Complexity", 1, 10, value=task["complexity"])
504
+ with col2:
505
+ new_due_date = st.date_input("Due Date", value=task["due_date_time"].date())
506
+ new_due_time = st.time_input("Due Time", value=task["due_date_time"].time())
507
+
508
+ col1, col2 = st.columns(2)
509
+ with col1:
510
+ if st.form_submit_button("💾 Save"):
511
+ # Update task
512
+ task["description"] = new_description
513
+ task["due_date_time"] = datetime.combine(new_due_date, new_due_time)
514
+ task["time_remaining"] = task["due_date_time"] - datetime.now()
515
+ task["complexity"] = new_complexity
516
+
517
+ # Recalculate priority
518
+ task["priority_score"] = calculate_priority_score(
519
+ task["predicted_intent_score"],
520
+ task["predicted_emotion"],
521
+ task["predicted_label_name"],
522
+ task["time_remaining"],
523
+ task["complexity"],
524
+ get_emotion_category(task["predicted_label_name"])
525
+ )
526
+ st.session_state.editing_task_id = None
527
+ st.success("Task updated!")
528
+ st.rerun()
529
+
530
+ with col2:
531
+ if st.form_submit_button("❌ Cancel"):
532
+ st.session_state.editing_task_id = None
533
+ st.rerun()
534
+
535
+ # AI Plan Section
536
+ if st.session_state.tasks:
537
+ st.markdown('<div class="custom-card">', unsafe_allow_html=True)
538
+ st.markdown('<h3>⏰ AI Task Planning</h3>', unsafe_allow_html=True)
539
+
540
+ col_date, col_time = st.columns(2)
541
+
542
+ with col_date:
543
+ plan_date = st.date_input("Select Plan Date", datetime.now().date())
544
+
545
+ with col_time:
546
+ plan_time = st.time_input("Select Plan Start Time", datetime.now().time())
547
+
548
+ selected_datetime = datetime.combine(plan_date, plan_time)
549
+
550
+ if st.button("📅 Generate AI Plan"):
551
+ suggestion = get_llama_suggestion(
552
+ st.session_state.overall_emotion_label,
553
+ st.session_state.tasks,
554
+ selected_datetime # Pass full datetime object
555
+ )
556
+ st.markdown(f'<div class="info-box">{suggestion}</div>', unsafe_allow_html=True)
557
+ st.markdown('</div>', unsafe_allow_html=True)
558
+
task_css.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_custom_css():
2
+ return """
3
+ <style>
4
+ :root {
5
+ /* Refined Color Palette */
6
+ --primary-blue: #3B82F6; /* Vibrant Blue */
7
+ --primary-dark: #1E40AF; /* Deeper Blue */
8
+ --accent-teal: #0EA5E9; /* Bright Teal */
9
+ --background-light: #F9FAFB; /* Soft White */
10
+ --text-dark: #1E293B; /* Deep Navy */
11
+ --text-medium: #475569; /* Medium Slate */
12
+ --accent-orange: #F97316; /* Warm Orange */
13
+ --success-green: #10B981; /* Emerald Green */
14
+ --warning-yellow: #FBBF24; /* Amber Yellow */
15
+ --error-red: #EF4444; /* Cherry Red */
16
+
17
+ /* Refined Gradients */
18
+ --gradient-primary: linear-gradient(135deg, var(--primary-blue), var(--primary-dark));
19
+ --gradient-accent: linear-gradient(135deg, var(--accent-teal), #38BDF8);
20
+ --gradient-warm: linear-gradient(135deg, var(--accent-orange), #FB923C);
21
+ }
22
+
23
+ /* Global Reset with Professional Typography */
24
+ body, .stApp {
25
+ font-family: 'Inter', system-ui, -apple-system, sans-serif;
26
+ background-color: var(--background-light) !important;
27
+ color: var(--text-dark);
28
+ line-height: 1.6;
29
+ letter-spacing: -0.011em;
30
+ }
31
+
32
+ /* App Container with Refined Depth */
33
+ [data-testid="stAppViewContainer"] {
34
+ background-color: var(--background-light) !important;
35
+ max-width: 1100px;
36
+ margin: 0 auto;
37
+ padding: 2.5rem;
38
+ border-radius: 16px;
39
+ box-shadow:
40
+ 0 20px 25px -5px rgba(59, 130, 246, 0.1),
41
+ 0 10px 10px -5px rgba(59, 130, 246, 0.04),
42
+ inset 0 1px 0 rgba(255, 255, 255, 0.8);
43
+ transition: all 0.3s ease;
44
+ }
45
+
46
+ /* Professional Header */
47
+ .main-header {
48
+ display: flex;
49
+ align-items: center;
50
+ justify-content: center;
51
+ margin-bottom: 2rem;
52
+ color: var(--primary-blue);
53
+ font-size: 2rem;
54
+ font-weight: 700;
55
+ letter-spacing: -0.03em;
56
+ }
57
+
58
+ .main-header::before {
59
+
60
+ margin-right: 15px;
61
+ font-size: 2.2rem;
62
+ transition: transform 0.3s ease;
63
+ }
64
+
65
+ .main-title:hover::before {
66
+ transform: scale(1.1) rotate(5deg);
67
+ }
68
+
69
+ /* Professional Card Sections */
70
+ .emotion-analysis, .task-input {
71
+ background-color: white;
72
+ border-radius: 12px;
73
+ padding: 1.8rem;
74
+ box-shadow:
75
+ 0 4px 6px -1px rgba(59, 130, 246, 0.1),
76
+ 0 2px 4px -1px rgba(59, 130, 246, 0.06);
77
+ margin-bottom: 1.5rem;
78
+ transition: all 0.2s ease;
79
+ border-top: 3px solid var(--primary-blue);
80
+ }
81
+
82
+ .emotion-analysis:hover, .task-input:hover {
83
+ transform: translateY(-3px);
84
+ box-shadow:
85
+ 0 10px 15px -3px rgba(59, 130, 246, 0.1),
86
+ 0 4px 6px -2px rgba(59, 130, 246, 0.05);
87
+ }
88
+
89
+ /* Section Headers */
90
+ .stMarkdown h3 {
91
+ color: var(--primary-blue);
92
+ font-weight: 600;
93
+ font-size: 1.3rem;
94
+ margin-bottom: 1rem;
95
+ letter-spacing: -0.01em;
96
+ border-bottom: 1px solid rgba(59, 130, 246, 0.2);
97
+ padding-bottom: 0.5rem;
98
+ }
99
+
100
+ /* Polished Input Elements */
101
+ .stTextArea textarea,
102
+ .stTextInput>div>div>input {
103
+ border: 1px solid rgba(59, 130, 246, 0.3) !important;
104
+ border-radius: 8px !important;
105
+ padding: 12px 14px !important;
106
+ background-color: white !important;
107
+ color: var(--text-dark) !important;
108
+ font-weight: 400;
109
+ transition: all 0.2s ease !important;
110
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05) !important;
111
+ }
112
+
113
+ .stTextArea textarea:focus,
114
+ .stTextInput>div>div>input:focus {
115
+ border-color: var(--primary-blue) !important;
116
+ box-shadow:
117
+ 0 0 0 3px rgba(59, 130, 246, 0.15) !important,
118
+ 0 1px 2px rgba(0, 0, 0, 0.05) !important;
119
+ outline: none !important;
120
+ }
121
+
122
+ /* Contextual Badges */
123
+ .emotion-badge {
124
+ background: var(--gradient-accent);
125
+ color: white !important;
126
+ border-radius: 6px;
127
+ padding: 8px 12px;
128
+ font-weight: 600;
129
+ display: inline-block;
130
+ margin-top: 10px;
131
+ box-shadow: 0 2px 4px rgba(14, 165, 233, 0.2);
132
+ }
133
+
134
+ .warning-badge {
135
+ background: var(--gradient-warm);
136
+ color: white !important;
137
+ border-radius: 6px;
138
+ padding: 8px 12px;
139
+ font-weight: 600;
140
+ display: inline-block;
141
+ box-shadow: 0 2px 4px rgba(249, 115, 22, 0.2);
142
+ }
143
+
144
+ /* Professional Button */
145
+ .stButton>button {
146
+ background: var(--gradient-primary) !important;
147
+ color: white !important;
148
+ border: none !important;
149
+ border-radius: 8px !important;
150
+ padding: 10px 20px !important;
151
+ font-weight: 600;
152
+ font-size: 0.9rem;
153
+ letter-spacing: 0.02em;
154
+ transition: all 0.2s ease !important;
155
+ box-shadow:
156
+ 0 4px 6px -1px rgba(59, 130, 246, 0.2),
157
+ 0 2px 4px -1px rgba(59, 130, 246, 0.1);
158
+ }
159
+
160
+ .stButton>button:hover {
161
+ transform: translateY(-2px);
162
+ box-shadow:
163
+ 0 6px 10px -1px rgba(59, 130, 246, 0.25),
164
+ 0 4px 6px -1px rgba(59, 130, 246, 0.15);
165
+ }
166
+
167
+ .stButton>button:active {
168
+ transform: translateY(0);
169
+ box-shadow:
170
+ 0 2px 4px -1px rgba(59, 130, 246, 0.2),
171
+ 0 1px 2px -1px rgba(59, 130, 246, 0.1);
172
+ }
173
+
174
+ /* Improved Slider */
175
+ .stSlider {
176
+ margin-top: 12px;
177
+ }
178
+
179
+ .stSlider > div > div > div {
180
+ background-color: #CBD5E1 !important;
181
+ height: 6px !important;
182
+ border-radius: 3px !important;
183
+ }
184
+
185
+ .stSlider > div > div > div > div {
186
+ background: var(--primary-blue) !important;
187
+ box-shadow: 0 0 0 2px white, 0 0 0 3px rgba(59, 130, 246, 0.2) !important;
188
+ width: 18px !important;
189
+ height: 18px !important;
190
+ border-radius: 50% !important;
191
+ transition: transform 0.2s ease !important;
192
+ }
193
+
194
+ .stSlider > div > div > div > div:hover {
195
+ transform: scale(1.15) !important;
196
+ }
197
+
198
+ /* Progress Bar */
199
+ .stProgress > div > div > div {
200
+ background-color: var(--primary-blue) !important;
201
+ border-radius: 4px !important;
202
+ }
203
+
204
+ /* Select Boxes */
205
+ .stSelectbox label {
206
+ color: var(--text-medium) !important;
207
+ font-weight: 500 !important;
208
+ }
209
+
210
+ .stSelectbox > div > div > div {
211
+ background-color: white !important;
212
+ border: 1px solid rgba(59, 130, 246, 0.3) !important;
213
+ border-radius: 8px !important;
214
+ padding: 4px 8px !important;
215
+ }
216
+
217
+ /* Checkbox */
218
+ .stCheckbox label {
219
+ color: var(--text-medium) !important;
220
+ font-size: 0.95rem !important;
221
+ }
222
+
223
+ /* Tabs */
224
+ .stTabs [data-baseweb="tab-list"] {
225
+ gap: 2px;
226
+ background-color: rgba(59, 130, 246, 0.1) !important;
227
+ border-radius: 8px !important;
228
+ padding: 2px !important;
229
+ }
230
+
231
+ .stTabs [data-baseweb="tab"] {
232
+ background-color: transparent !important;
233
+ border-radius: 6px !important;
234
+ padding: 8px 16px !important;
235
+ border: none !important;
236
+ color: var(--text-medium) !important;
237
+ font-weight: 500 !important;
238
+ transition: all 0.2s ease !important;
239
+ }
240
+
241
+ .stTabs [aria-selected="true"] {
242
+ background-color: white !important;
243
+ color: var(--primary-blue) !important;
244
+ font-weight: 600 !important;
245
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
246
+ }
247
+
248
+ /* Info Boxes */
249
+ .info-box {
250
+ background-color: rgba(14, 165, 233, 0.1);
251
+ border-left: 3px solid var(--accent-teal);
252
+ border-radius: 6px;
253
+ padding: 15px;
254
+ margin: 15px 0;
255
+ color: var(--text-dark);
256
+ }
257
+
258
+ .success-box {
259
+ background-color: rgba(16, 185, 129, 0.1);
260
+ border-left: 3px solid var(--success-green);
261
+ border-radius: 6px;
262
+ padding: 15px;
263
+ margin: 15px 0;
264
+ }
265
+
266
+ .warning-box {
267
+ background-color: rgba(251, 191, 36, 0.1);
268
+ border-left: 3px solid var(--warning-yellow);
269
+ border-radius: 6px;
270
+ padding: 15px;
271
+ margin: 15px 0;
272
+ }
273
+
274
+ .error-box {
275
+ background-color: rgba(239, 68, 68, 0.1);
276
+ border-left: 3px solid var(--error-red);
277
+ border-radius: 6px;
278
+ padding: 15px;
279
+ margin: 15px 0;
280
+ }
281
+
282
+ /* Data Elements */
283
+ .metric-card {
284
+ background-color: white;
285
+ border-radius: 10px;
286
+ padding: 20px;
287
+ display: flex;
288
+ flex-direction: column;
289
+ align-items: center;
290
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.05);
291
+ border-top: 3px solid var(--primary-blue);
292
+ }
293
+
294
+ .metric-value {
295
+ font-size: 2rem;
296
+ font-weight: 700;
297
+ color: var(--primary-blue);
298
+ }
299
+
300
+ .metric-label {
301
+ font-size: 0.9rem;
302
+ color: var(--text-medium);
303
+ margin-top: 5px;
304
+ }
305
+
306
+ /* Action Menu */
307
+ .action-menu {
308
+ position: relative;
309
+ display: inline-block;
310
+ }
311
+
312
+ .action-menu-content {
313
+ display: none;
314
+ position: absolute;
315
+ right: 0;
316
+ background-color: white;
317
+ min-width: 120px;
318
+ box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
319
+ z-index: 1;
320
+ border-radius: 8px;
321
+ overflow: hidden;
322
+ }
323
+
324
+ .action-menu-content a {
325
+ color: var(--text-dark);
326
+ padding: 12px 16px;
327
+ text-decoration: none;
328
+ display: block;
329
+ transition: background-color 0.2s ease;
330
+ }
331
+
332
+ .action-menu-content a:hover {
333
+ background-color: var(--primary-blue);
334
+ color: white;
335
+ }
336
+
337
+ .action-menu:hover .action-menu-content {
338
+ display: block;
339
+ }
340
+
341
+ .action-menu .three-dots {
342
+ cursor: pointer;
343
+ font-size: 1.5rem;
344
+ color: var(--text-medium);
345
+ }
346
+
347
+ /* Task Content Styling */
348
+ .task-content {
349
+ flex: 1;
350
+ padding-right: 20px;
351
+ }
352
+
353
+ .task-header {
354
+ display: flex;
355
+ justify-content: space-between;
356
+ align-items: center;
357
+ margin-bottom: 8px;
358
+ }
359
+
360
+ .task-title {
361
+ font-weight: 600;
362
+ color: var(--text-dark);
363
+ font-size: 1rem;
364
+ }
365
+
366
+ .priority-score {
367
+ background: var(--gradient-primary);
368
+ color: white;
369
+ padding: 4px 8px;
370
+ border-radius: 4px;
371
+ font-size: 0.85rem;
372
+ font-weight: 600;
373
+ }
374
+
375
+ .task-details {
376
+ display: flex;
377
+ gap: 16px;
378
+ }
379
+
380
+ .task-stat {
381
+ color: var(--text-medium);
382
+ font-size: 0.9rem;
383
+ }
384
+
385
+ /* Priority Task List */
386
+ .priority-task {
387
+ display: flex;
388
+ justify-content: space-between;
389
+ align-items: center;
390
+ padding: 15px;
391
+ border-radius: 8px;
392
+ margin-bottom: 8px;
393
+ background-color: white;
394
+ box-shadow: 0 2px 4px rgba(0,0,0,0.05);
395
+ }
396
+
397
+ .high-priority {
398
+ border-left: 4px solid var(--error-red);
399
+ }
400
+
401
+ .medium-priority {
402
+ border-left: 4px solid var(--warning-yellow);
403
+ }
404
+
405
+ /* Responsive Design */
406
+ @media (max-width: 768px) {
407
+ [data-testid="stAppViewContainer"] {
408
+ padding: 1.2rem;
409
+ border-radius: 12px;
410
+ }
411
+
412
+ .main-title {
413
+ font-size: 1.8rem;
414
+ }
415
+
416
+ .emotion-analysis, .task-input {
417
+ padding: 1.2rem;
418
+ }
419
+
420
+ .metric-value {
421
+ font-size: 1.6rem;
422
+ }
423
+ }
424
+
425
+ /* Loading Spinner */
426
+ .loading-container {
427
+ display: flex;
428
+ flex-direction: column;
429
+ align-items: center;
430
+ justify-content: center;
431
+ height: 60vh;
432
+ }
433
+
434
+ .loading-spinner {
435
+ width: 50px;
436
+ height: 50px;
437
+ border: 5px solid #f3f3f3;
438
+ border-top: 5px solid var(--primary-blue);
439
+ border-radius: 50%;
440
+ animation: spin 1s linear infinite;
441
+ margin-bottom: 20px;
442
+ }
443
+
444
+ @keyframes spin {
445
+ 0% { transform: rotate(0deg); }
446
+ 100% { transform: rotate(360deg); }
447
+ }
448
+
449
+ /* Status Messages */
450
+ .status-message {
451
+ background: white;
452
+ padding: 10px 15px;
453
+ border-radius: 8px;
454
+ margin: 5px 0;
455
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
456
+ }
457
+ </style>
458
+ """
task_prioritizer.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import BertTokenizer, BertForSequenceClassification, DistilBertForSequenceClassification
3
+ from datetime import datetime
4
+
5
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
6
+
7
+ # Load the intent classifier model and tokenizer
8
+ num_intent_labels = 151 # Set the correct number of labels for the intent classifier
9
+ intent_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_intent_labels)
10
+ intent_model.load_state_dict(torch.load("intent_classifier.pth"))
11
+ intent_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
12
+ intent_model.to(device)
13
+ intent_model.eval()
14
+
15
+ # Load the emotions model and tokenizer
16
+ emotions_model = DistilBertForSequenceClassification.from_pretrained("./saved_model")
17
+ emotions_tokenizer = BertTokenizer.from_pretrained("./saved_model")
18
+ emotions_model.to(device)
19
+ emotions_model.eval()
20
+
21
+ # Define the label names for emotions
22
+ emotion_label_names = ["admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief", "remorse", "sadness", "surprise", "neutral"]
23
+
24
+ def predict_intent(sentence):
25
+ inputs = intent_tokenizer(sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128)
26
+ inputs = {key: val.to(device) for key, val in inputs.items()}
27
+
28
+ with torch.no_grad():
29
+ outputs = intent_model(**inputs)
30
+ predicted_class = torch.argmax(outputs.logits, dim=1).cpu().numpy()[0]
31
+
32
+ return predicted_class
33
+
34
+ def predict_emotion(sentence):
35
+ inputs = emotions_tokenizer(sentence, return_tensors="pt", padding="max_length", truncation=True, max_length=128)
36
+ inputs = {key: val.to(device) for key, val in inputs.items() if key != "token_type_ids"}
37
+
38
+ with torch.no_grad():
39
+ outputs = emotions_model(**inputs)
40
+ predicted_class = torch.argmax(outputs.logits, dim=1).cpu().numpy()[0]
41
+
42
+ return predicted_class, emotion_label_names[predicted_class]
43
+
44
+ def calculate_priority_score(intent, emotion, time_remaining):
45
+ # Example priority score calculation
46
+ intent_weight = 0.4
47
+ emotion_weight = 0.3
48
+ time_weight = 0.3
49
+
50
+ # Normalize time_remaining to a score between 0 and 1
51
+ time_score = max(0, min(1, 1 - (time_remaining.total_seconds() / (24 * 3600))))
52
+
53
+ # Calculate priority score
54
+ priority_score = (intent * intent_weight) + (emotion * emotion_weight) + (time_score * time_weight)
55
+ return priority_score
56
+
57
+ def prioritize_task(task_description, due_date_time, predicted_emotion, predicted_label_name):
58
+ predicted_intent = predict_intent(task_description)
59
+
60
+ # Calculate time remaining until the due date and time
61
+ due_date_time = datetime.strptime(due_date_time, "%Y-%m-%d %H:%M:%S")
62
+ time_remaining = due_date_time - datetime.now()
63
+
64
+ priority_score = calculate_priority_score(predicted_intent, predicted_emotion, time_remaining)
65
+
66
+ return {
67
+ "description": task_description,
68
+ "due_date_time": due_date_time,
69
+ "time_remaining": time_remaining,
70
+ "predicted_intent": predicted_intent,
71
+ "predicted_emotion": predicted_emotion,
72
+ "predicted_label_name": predicted_label_name,
73
+ "priority_score": priority_score
74
+ }
75
+
76
+ # Example tasks
77
+ tasks = [
78
+ {"description": "Finish the report by tomorrow.", "due_date_time": "2025-03-02 09:00:00"},
79
+ {"description": "meeting", "due_date_time": "2025-03-02 12:00:00"},
80
+ {"description": "listen to music.", "due_date_time": "2025-03-02 15:00:00"},
81
+ {"description": "daily linkedin queens game.", "due_date_time": "2025-03-02 18:00:00"},
82
+ {"description": "prepare ppt", "due_date_time": "2025-03-02 21:00:00"}
83
+ ]
84
+
85
+ # Overall emotion sentence
86
+ emotion_sentence = "I am feeling very tired and stressed now"
87
+ predicted_emotion, predicted_label_name = predict_emotion(emotion_sentence)
88
+
89
+ # Prioritize tasks
90
+ prioritized_tasks = []
91
+ for task in tasks:
92
+ prioritized_tasks.append(prioritize_task(task["description"], task["due_date_time"], predicted_emotion, predicted_label_name))
93
+
94
+ # Reorder tasks based on priority score (descending order)
95
+ prioritized_tasks.sort(key=lambda x: x["priority_score"], reverse=True)
96
+
97
+ # Print prioritized tasks
98
+ for task in prioritized_tasks:
99
+ print(f"Task Description: '{task['description']}'")
100
+ print(f"Due Date and Time: {task['due_date_time']}")
101
+ print(f"Time Remaining: {task['time_remaining']}")
102
+ print(f"Predicted Intent: {task['predicted_intent']}")
103
+ print(f"Predicted Emotion: {task['predicted_emotion']} ({task['predicted_label_name']})")
104
+ print(f"Priority Score: {task['priority_score']:.4f}")
105
+ print()
task_ui.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_custom_css():
2
+ return """
3
+ <style>
4
+ :root {
5
+ /* Refined Color Palette */
6
+ --primary-blue: #3B82F6; /* Vibrant Blue */
7
+ --primary-dark: #1E40AF; /* Deeper Blue */
8
+ --accent-teal: #0EA5E9; /* Bright Teal */
9
+ --background-light: #F9FAFB; /* Soft White */
10
+ --text-dark: #1E293B; /* Deep Navy */
11
+ --text-medium: #475569; /* Medium Slate */
12
+ --accent-orange: #F97316; /* Warm Orange */
13
+ --success-green: #10B981; /* Emerald Green */
14
+ --warning-yellow: #FBBF24; /* Amber Yellow */
15
+ --error-red: #EF4444; /* Cherry Red */
16
+
17
+ /* Refined Gradients */
18
+ --gradient-primary: linear-gradient(135deg, var(--primary-blue), var(--primary-dark));
19
+ --gradient-accent: linear-gradient(135deg, var(--accent-teal), #38BDF8);
20
+ --gradient-warm: linear-gradient(135deg, var(--accent-orange), #FB923C);
21
+ }
22
+
23
+ /* Global Reset with Professional Typography */
24
+ body, .stApp {
25
+ font-family: 'Inter', system-ui, -apple-system, sans-serif;
26
+ background-color: var(--background-light) !important;
27
+ color: var(--text-dark);
28
+ line-height: 1.6;
29
+ letter-spacing: -0.011em;
30
+ }
31
+
32
+ /* App Container with Refined Depth */
33
+ [data-testid="stAppViewContainer"] {
34
+ background-color: var(--background-light) !important;
35
+ max-width: 1100px;
36
+ margin: 0 auto;
37
+ padding: 2.5rem;
38
+ border-radius: 16px;
39
+ box-shadow:
40
+ 0 20px 25px -5px rgba(59, 130, 246, 0.1),
41
+ 0 10px 10px -5px rgba(59, 130, 246, 0.04),
42
+ inset 0 1px 0 rgba(255, 255, 255, 0.8);
43
+ transition: all 0.3s ease;
44
+ }
45
+
46
+ /* Professional Header */
47
+ .main-title {
48
+ display: flex;
49
+ align-items: center;
50
+ justify-content: center;
51
+ margin-bottom: 2rem;
52
+ color: var(--primary-blue);
53
+ font-size: 2.5rem;
54
+ font-weight: 700;
55
+ letter-spacing: -0.03em;
56
+ }
57
+
58
+ .main-title::before {
59
+
60
+ margin-right: 15px;
61
+ font-size: 2.2rem;
62
+ transition: transform 0.3s ease;
63
+ }
64
+
65
+ .main-title:hover::before {
66
+ transform: scale(1.1) rotate(5deg);
67
+ }
68
+
69
+ /* Professional Card Sections */
70
+ .emotion-analysis, .task-input {
71
+ background-color: white;
72
+ border-radius: 12px;
73
+ padding: 1.8rem;
74
+ box-shadow:
75
+ 0 4px 6px -1px rgba(59, 130, 246, 0.1),
76
+ 0 2px 4px -1px rgba(59, 130, 246, 0.06);
77
+ margin-bottom: 1.5rem;
78
+ transition: all 0.2s ease;
79
+ border-top: 3px solid var(--primary-blue);
80
+ }
81
+
82
+ .emotion-analysis:hover, .task-input:hover {
83
+ transform: translateY(-3px);
84
+ box-shadow:
85
+ 0 10px 15px -3px rgba(59, 130, 246, 0.1),
86
+ 0 4px 6px -2px rgba(59, 130, 246, 0.05);
87
+ }
88
+
89
+ /* Section Headers */
90
+ .stMarkdown h3 {
91
+ color: var(--primary-blue);
92
+ font-weight: 600;
93
+ font-size: 1.3rem;
94
+ margin-bottom: 1rem;
95
+ letter-spacing: -0.01em;
96
+ border-bottom: 1px solid rgba(59, 130, 246, 0.2);
97
+ padding-bottom: 0.5rem;
98
+ }
99
+
100
+ /* Polished Input Elements */
101
+ .stTextArea textarea,
102
+ .stTextInput>div>div>input {
103
+ border: 1px solid rgba(59, 130, 246, 0.3) !important;
104
+ border-radius: 8px !important;
105
+ padding: 12px 14px !important;
106
+ background-color: white !important;
107
+ color: var(--text-dark) !important;
108
+ font-weight: 400;
109
+ transition: all 0.2s ease !important;
110
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05) !important;
111
+ }
112
+
113
+ .stTextArea textarea:focus,
114
+ .stTextInput>div>div>input:focus {
115
+ border-color: var(--primary-blue) !important;
116
+ box-shadow:
117
+ 0 0 0 3px rgba(59, 130, 246, 0.15) !important,
118
+ 0 1px 2px rgba(0, 0, 0, 0.05) !important;
119
+ outline: none !important;
120
+ }
121
+
122
+ /* Contextual Badges */
123
+ .emotion-badge {
124
+ background: var(--gradient-accent);
125
+ color: white !important;
126
+ border-radius: 6px;
127
+ padding: 8px 12px;
128
+ font-weight: 600;
129
+ display: inline-block;
130
+ margin-top: 10px;
131
+ box-shadow: 0 2px 4px rgba(14, 165, 233, 0.2);
132
+ }
133
+
134
+ .warning-badge {
135
+ background: var(--gradient-warm);
136
+ color: white !important;
137
+ border-radius: 6px;
138
+ padding: 8px 12px;
139
+ font-weight: 600;
140
+ display: inline-block;
141
+ box-shadow: 0 2px 4px rgba(249, 115, 22, 0.2);
142
+ }
143
+
144
+ /* Professional Button */
145
+ .stButton>button {
146
+ background: var(--gradient-primary) !important;
147
+ color: white !important;
148
+ border: none !important;
149
+ border-radius: 8px !important;
150
+ padding: 10px 20px !important;
151
+ font-weight: 600;
152
+ font-size: 0.9rem;
153
+ letter-spacing: 0.02em;
154
+ transition: all 0.2s ease !important;
155
+ box-shadow:
156
+ 0 4px 6px -1px rgba(59, 130, 246, 0.2),
157
+ 0 2px 4px -1px rgba(59, 130, 246, 0.1);
158
+ }
159
+
160
+ .stButton>button:hover {
161
+ transform: translateY(-2px);
162
+ box-shadow:
163
+ 0 6px 10px -1px rgba(59, 130, 246, 0.25),
164
+ 0 4px 6px -1px rgba(59, 130, 246, 0.15);
165
+ }
166
+
167
+ .stButton>button:active {
168
+ transform: translateY(0);
169
+ box-shadow:
170
+ 0 2px 4px -1px rgba(59, 130, 246, 0.2),
171
+ 0 1px 2px -1px rgba(59, 130, 246, 0.1);
172
+ }
173
+
174
+ /* Improved Slider */
175
+ .stSlider {
176
+ margin-top: 12px;
177
+ }
178
+
179
+ .stSlider > div > div > div {
180
+ background-color: #CBD5E1 !important;
181
+ height: 6px !important;
182
+ border-radius: 3px !important;
183
+ }
184
+
185
+ .stSlider > div > div > div > div {
186
+ background: var(--primary-blue) !important;
187
+ box-shadow: 0 0 0 2px white, 0 0 0 3px rgba(59, 130, 246, 0.2) !important;
188
+ width: 18px !important;
189
+ height: 18px !important;
190
+ border-radius: 50% !important;
191
+ transition: transform 0.2s ease !important;
192
+ }
193
+
194
+ .stSlider > div > div > div > div:hover {
195
+ transform: scale(1.15) !important;
196
+ }
197
+
198
+ /* Progress Bar */
199
+ .stProgress > div > div > div {
200
+ background-color: var(--primary-blue) !important;
201
+ border-radius: 4px !important;
202
+ }
203
+
204
+ /* Select Boxes */
205
+ .stSelectbox label {
206
+ color: var(--text-medium) !important;
207
+ font-weight: 500 !important;
208
+ }
209
+
210
+ .stSelectbox > div > div > div {
211
+ background-color: white !important;
212
+ border: 1px solid rgba(59, 130, 246, 0.3) !important;
213
+ border-radius: 8px !important;
214
+ padding: 4px 8px !important;
215
+ }
216
+
217
+ /* Checkbox */
218
+ .stCheckbox label {
219
+ color: var(--text-medium) !important;
220
+ font-size: 0.95rem !important;
221
+ }
222
+
223
+ /* Tabs */
224
+ .stTabs [data-baseweb="tab-list"] {
225
+ gap: 2px;
226
+ background-color: rgba(59, 130, 246, 0.1) !important;
227
+ border-radius: 8px !important;
228
+ padding: 2px !important;
229
+ }
230
+
231
+ .stTabs [data-baseweb="tab"] {
232
+ background-color: transparent !important;
233
+ border-radius: 6px !important;
234
+ padding: 8px 16px !important;
235
+ border: none !important;
236
+ color: var(--text-medium) !important;
237
+ font-weight: 500 !important;
238
+ transition: all 0.2s ease !important;
239
+ }
240
+
241
+ .stTabs [aria-selected="true"] {
242
+ background-color: white !important;
243
+ color: var(--primary-blue) !important;
244
+ font-weight: 600 !important;
245
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
246
+ }
247
+
248
+ /* Info Boxes */
249
+ .info-box {
250
+ background-color: rgba(14, 165, 233, 0.1);
251
+ border-left: 3px solid var(--accent-teal);
252
+ border-radius: 6px;
253
+ padding: 15px;
254
+ margin: 15px 0;
255
+ color: var(--text-dark);
256
+ }
257
+
258
+ .success-box {
259
+ background-color: rgba(16, 185, 129, 0.1);
260
+ border-left: 3px solid var(--success-green);
261
+ border-radius: 6px;
262
+ padding: 15px;
263
+ margin: 15px 0;
264
+ }
265
+
266
+ .warning-box {
267
+ background-color: rgba(251, 191, 36, 0.1);
268
+ border-left: 3px solid var(--warning-yellow);
269
+ border-radius: 6px;
270
+ padding: 15px;
271
+ margin: 15px 0;
272
+ }
273
+
274
+ .error-box {
275
+ background-color: rgba(239, 68, 68, 0.1);
276
+ border-left: 3px solid var(--error-red);
277
+ border-radius: 6px;
278
+ padding: 15px;
279
+ margin: 15px 0;
280
+ }
281
+
282
+ /* Data Elements */
283
+ .metric-card {
284
+ background-color: white;
285
+ border-radius: 10px;
286
+ padding: 20px;
287
+ display: flex;
288
+ flex-direction: column;
289
+ align-items: center;
290
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.05);
291
+ border-top: 3px solid var(--primary-blue);
292
+ }
293
+
294
+ .metric-value {
295
+ font-size: 2rem;
296
+ font-weight: 700;
297
+ color: var(--primary-blue);
298
+ }
299
+
300
+ .metric-label {
301
+ font-size: 0.9rem;
302
+ color: var(--text-medium);
303
+ margin-top: 5px;
304
+ }
305
+
306
+ /* Responsive Design */
307
+ @media (max-width: 768px) {
308
+ [data-testid="stAppViewContainer"] {
309
+ padding: 1.2rem;
310
+ border-radius: 12px;
311
+ }
312
+
313
+ .main-title {
314
+ font-size: 1.8rem;
315
+ }
316
+
317
+ .emotion-analysis, .task-input {
318
+ padding: 1.2rem;
319
+ }
320
+
321
+ .metric-value {
322
+ font-size: 1.6rem;
323
+ }
324
+ }
325
+ </style>
326
+ """
test_results.csv ADDED
@@ -0,0 +1,5428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ true_labels,predicted_labels
2
+ 25,24
3
+ 0,0
4
+ 13,0
5
+ 15,15
6
+ 27,27
7
+ 15,15
8
+ 15,15
9
+ 15,0
10
+ 24,24
11
+ 25,9
12
+ 3,27
13
+ 1,1
14
+ 8,8
15
+ 0,0
16
+ 14,14
17
+ 10,10
18
+ 14,11
19
+ 25,27
20
+ 1,1
21
+ 15,15
22
+ 1,3
23
+ 24,7
24
+ 27,27
25
+ 27,27
26
+ 6,27
27
+ 27,27
28
+ 0,18
29
+ 27,27
30
+ 27,4
31
+ 7,7
32
+ 27,27
33
+ 27,7
34
+ 27,27
35
+ 20,20
36
+ 4,27
37
+ 27,27
38
+ 27,27
39
+ 27,27
40
+ 27,27
41
+ 0,1
42
+ 10,27
43
+ 5,27
44
+ 27,27
45
+ 27,27
46
+ 17,0
47
+ 27,10
48
+ 1,1
49
+ 1,1
50
+ 27,10
51
+ 4,27
52
+ 27,27
53
+ 3,1
54
+ 10,27
55
+ 27,27
56
+ 14,14
57
+ 27,4
58
+ 8,27
59
+ 27,27
60
+ 24,24
61
+ 15,15
62
+ 27,6
63
+ 18,0
64
+ 18,18
65
+ 27,8
66
+ 15,15
67
+ 25,24
68
+ 14,5
69
+ 6,7
70
+ 9,27
71
+ 3,3
72
+ 3,10
73
+ 27,27
74
+ 27,27
75
+ 0,1
76
+ 5,5
77
+ 10,3
78
+ 0,0
79
+ 14,27
80
+ 4,4
81
+ 2,3
82
+ 0,27
83
+ 20,27
84
+ 15,15
85
+ 7,7
86
+ 3,27
87
+ 6,7
88
+ 10,10
89
+ 27,27
90
+ 0,18
91
+ 15,15
92
+ 7,17
93
+ 10,5
94
+ 27,27
95
+ 22,4
96
+ 26,26
97
+ 27,27
98
+ 14,14
99
+ 13,17
100
+ 27,27
101
+ 5,0
102
+ 3,27
103
+ 6,6
104
+ 27,27
105
+ 9,0
106
+ 2,27
107
+ 4,27
108
+ 10,10
109
+ 3,27
110
+ 2,3
111
+ 27,27
112
+ 0,0
113
+ 27,27
114
+ 5,27
115
+ 17,18
116
+ 27,27
117
+ 0,0
118
+ 27,27
119
+ 20,20
120
+ 5,5
121
+ 27,27
122
+ 26,26
123
+ 27,27
124
+ 1,1
125
+ 4,4
126
+ 7,7
127
+ 8,2
128
+ 0,27
129
+ 4,10
130
+ 1,1
131
+ 3,27
132
+ 7,7
133
+ 27,14
134
+ 3,27
135
+ 22,0
136
+ 20,27
137
+ 27,27
138
+ 14,14
139
+ 10,11
140
+ 2,2
141
+ 27,8
142
+ 27,27
143
+ 10,10
144
+ 18,18
145
+ 6,4
146
+ 1,1
147
+ 11,11
148
+ 17,17
149
+ 27,7
150
+ 20,27
151
+ 27,27
152
+ 27,27
153
+ 27,27
154
+ 3,3
155
+ 12,1
156
+ 15,15
157
+ 27,27
158
+ 27,0
159
+ 27,26
160
+ 27,27
161
+ 27,5
162
+ 27,27
163
+ 10,27
164
+ 27,27
165
+ 24,24
166
+ 27,7
167
+ 2,2
168
+ 7,7
169
+ 27,27
170
+ 2,2
171
+ 15,15
172
+ 27,27
173
+ 7,7
174
+ 1,1
175
+ 9,8
176
+ 4,15
177
+ 1,1
178
+ 26,14
179
+ 5,24
180
+ 0,0
181
+ 27,0
182
+ 25,25
183
+ 6,6
184
+ 24,27
185
+ 27,27
186
+ 22,22
187
+ 27,27
188
+ 15,15
189
+ 26,3
190
+ 27,27
191
+ 3,3
192
+ 15,15
193
+ 10,3
194
+ 19,14
195
+ 0,0
196
+ 17,1
197
+ 5,5
198
+ 26,26
199
+ 27,27
200
+ 27,27
201
+ 3,9
202
+ 22,27
203
+ 27,27
204
+ 11,3
205
+ 27,27
206
+ 27,27
207
+ 27,27
208
+ 27,27
209
+ 22,27
210
+ 17,17
211
+ 20,20
212
+ 27,27
213
+ 14,27
214
+ 17,0
215
+ 15,15
216
+ 27,27
217
+ 27,27
218
+ 18,18
219
+ 0,0
220
+ 15,15
221
+ 21,0
222
+ 3,3
223
+ 5,27
224
+ 27,0
225
+ 10,24
226
+ 27,4
227
+ 27,27
228
+ 11,2
229
+ 27,27
230
+ 3,27
231
+ 27,27
232
+ 4,4
233
+ 3,3
234
+ 27,27
235
+ 27,27
236
+ 0,0
237
+ 27,27
238
+ 27,26
239
+ 27,27
240
+ 27,27
241
+ 27,27
242
+ 27,27
243
+ 12,12
244
+ 18,18
245
+ 27,27
246
+ 3,27
247
+ 27,27
248
+ 11,2
249
+ 0,0
250
+ 27,27
251
+ 25,27
252
+ 14,14
253
+ 27,27
254
+ 9,10
255
+ 6,6
256
+ 27,8
257
+ 25,25
258
+ 2,3
259
+ 27,4
260
+ 27,27
261
+ 2,3
262
+ 10,10
263
+ 7,7
264
+ 27,27
265
+ 14,14
266
+ 8,8
267
+ 22,27
268
+ 9,9
269
+ 11,11
270
+ 15,15
271
+ 4,4
272
+ 15,15
273
+ 11,9
274
+ 24,24
275
+ 1,1
276
+ 8,14
277
+ 27,27
278
+ 27,27
279
+ 27,27
280
+ 0,0
281
+ 14,25
282
+ 1,1
283
+ 27,27
284
+ 11,11
285
+ 5,27
286
+ 27,27
287
+ 0,17
288
+ 0,0
289
+ 11,2
290
+ 27,27
291
+ 27,27
292
+ 7,7
293
+ 26,7
294
+ 27,27
295
+ 20,26
296
+ 15,15
297
+ 6,6
298
+ 27,27
299
+ 5,27
300
+ 27,27
301
+ 27,27
302
+ 7,7
303
+ 6,18
304
+ 27,27
305
+ 27,5
306
+ 9,27
307
+ 27,11
308
+ 26,27
309
+ 7,12
310
+ 22,27
311
+ 7,7
312
+ 18,18
313
+ 27,7
314
+ 27,27
315
+ 27,27
316
+ 0,0
317
+ 3,7
318
+ 5,5
319
+ 27,27
320
+ 27,27
321
+ 11,11
322
+ 15,15
323
+ 0,0
324
+ 27,27
325
+ 27,27
326
+ 10,10
327
+ 0,4
328
+ 1,1
329
+ 10,10
330
+ 26,26
331
+ 15,15
332
+ 27,27
333
+ 0,0
334
+ 2,11
335
+ 27,2
336
+ 27,27
337
+ 8,20
338
+ 14,14
339
+ 8,18
340
+ 0,0
341
+ 4,18
342
+ 15,15
343
+ 8,17
344
+ 1,0
345
+ 15,15
346
+ 10,9
347
+ 27,27
348
+ 2,2
349
+ 27,2
350
+ 27,7
351
+ 1,1
352
+ 1,1
353
+ 27,27
354
+ 13,27
355
+ 25,26
356
+ 0,0
357
+ 27,27
358
+ 3,27
359
+ 6,7
360
+ 27,27
361
+ 27,27
362
+ 5,27
363
+ 15,15
364
+ 5,20
365
+ 3,27
366
+ 8,8
367
+ 13,15
368
+ 27,27
369
+ 7,27
370
+ 10,10
371
+ 9,2
372
+ 5,27
373
+ 2,2
374
+ 27,27
375
+ 2,27
376
+ 1,1
377
+ 27,10
378
+ 26,3
379
+ 27,26
380
+ 25,26
381
+ 27,27
382
+ 15,15
383
+ 10,27
384
+ 27,27
385
+ 2,2
386
+ 0,0
387
+ 1,1
388
+ 1,1
389
+ 25,25
390
+ 3,7
391
+ 27,27
392
+ 17,17
393
+ 3,2
394
+ 27,0
395
+ 27,2
396
+ 13,13
397
+ 10,27
398
+ 15,17
399
+ 27,27
400
+ 27,27
401
+ 4,27
402
+ 7,17
403
+ 4,10
404
+ 4,27
405
+ 2,3
406
+ 4,27
407
+ 27,7
408
+ 0,27
409
+ 17,27
410
+ 3,27
411
+ 27,27
412
+ 10,10
413
+ 11,11
414
+ 13,0
415
+ 5,18
416
+ 27,27
417
+ 27,27
418
+ 27,8
419
+ 4,27
420
+ 6,7
421
+ 4,2
422
+ 24,24
423
+ 20,20
424
+ 6,6
425
+ 3,27
426
+ 27,27
427
+ 13,13
428
+ 27,27
429
+ 0,0
430
+ 27,27
431
+ 27,27
432
+ 0,0
433
+ 27,27
434
+ 24,24
435
+ 27,27
436
+ 0,0
437
+ 9,25
438
+ 10,27
439
+ 27,27
440
+ 24,24
441
+ 27,27
442
+ 27,27
443
+ 10,11
444
+ 8,27
445
+ 3,10
446
+ 0,0
447
+ 7,6
448
+ 12,12
449
+ 4,20
450
+ 4,6
451
+ 22,27
452
+ 26,26
453
+ 8,25
454
+ 20,20
455
+ 15,15
456
+ 8,8
457
+ 27,27
458
+ 4,4
459
+ 20,20
460
+ 27,27
461
+ 18,7
462
+ 27,7
463
+ 4,7
464
+ 27,27
465
+ 3,3
466
+ 2,3
467
+ 18,18
468
+ 7,7
469
+ 10,27
470
+ 1,1
471
+ 2,2
472
+ 4,27
473
+ 15,15
474
+ 5,27
475
+ 0,1
476
+ 27,27
477
+ 7,7
478
+ 27,27
479
+ 15,15
480
+ 15,15
481
+ 7,26
482
+ 0,18
483
+ 10,27
484
+ 10,6
485
+ 17,1
486
+ 3,3
487
+ 27,27
488
+ 27,27
489
+ 27,27
490
+ 27,8
491
+ 10,10
492
+ 7,27
493
+ 27,27
494
+ 18,0
495
+ 27,27
496
+ 2,2
497
+ 27,27
498
+ 22,27
499
+ 7,7
500
+ 27,27
501
+ 4,4
502
+ 18,18
503
+ 5,15
504
+ 9,9
505
+ 3,14
506
+ 2,2
507
+ 2,2
508
+ 27,27
509
+ 14,14
510
+ 6,15
511
+ 10,7
512
+ 27,27
513
+ 20,27
514
+ 10,10
515
+ 27,27
516
+ 27,27
517
+ 0,20
518
+ 6,7
519
+ 27,27
520
+ 27,22
521
+ 27,27
522
+ 15,15
523
+ 27,27
524
+ 27,27
525
+ 15,15
526
+ 3,27
527
+ 27,27
528
+ 0,0
529
+ 2,2
530
+ 18,18
531
+ 27,27
532
+ 27,27
533
+ 17,0
534
+ 25,25
535
+ 7,7
536
+ 5,5
537
+ 27,27
538
+ 0,0
539
+ 4,27
540
+ 2,2
541
+ 3,27
542
+ 27,27
543
+ 2,27
544
+ 27,27
545
+ 8,27
546
+ 20,27
547
+ 0,0
548
+ 0,15
549
+ 17,14
550
+ 22,27
551
+ 0,0
552
+ 25,26
553
+ 11,3
554
+ 1,0
555
+ 27,27
556
+ 10,2
557
+ 7,7
558
+ 27,4
559
+ 4,18
560
+ 15,15
561
+ 5,8
562
+ 9,14
563
+ 15,15
564
+ 3,7
565
+ 25,25
566
+ 15,15
567
+ 15,15
568
+ 10,11
569
+ 27,7
570
+ 10,10
571
+ 11,11
572
+ 27,27
573
+ 2,2
574
+ 25,27
575
+ 27,27
576
+ 22,27
577
+ 2,2
578
+ 4,0
579
+ 10,27
580
+ 0,0
581
+ 1,1
582
+ 10,27
583
+ 20,20
584
+ 6,6
585
+ 18,18
586
+ 0,27
587
+ 4,17
588
+ 3,27
589
+ 11,11
590
+ 4,27
591
+ 18,18
592
+ 27,27
593
+ 27,27
594
+ 27,27
595
+ 1,0
596
+ 27,27
597
+ 3,27
598
+ 20,20
599
+ 3,27
600
+ 27,27
601
+ 15,15
602
+ 0,27
603
+ 27,27
604
+ 24,25
605
+ 0,0
606
+ 15,15
607
+ 2,27
608
+ 27,27
609
+ 3,2
610
+ 7,7
611
+ 27,27
612
+ 27,14
613
+ 27,27
614
+ 15,15
615
+ 15,15
616
+ 27,27
617
+ 15,15
618
+ 27,27
619
+ 12,12
620
+ 15,15
621
+ 20,27
622
+ 6,6
623
+ 13,13
624
+ 9,27
625
+ 10,10
626
+ 27,27
627
+ 0,0
628
+ 2,27
629
+ 0,0
630
+ 0,0
631
+ 10,10
632
+ 20,27
633
+ 11,2
634
+ 14,14
635
+ 0,0
636
+ 3,2
637
+ 4,4
638
+ 0,0
639
+ 6,9
640
+ 9,3
641
+ 25,27
642
+ 15,20
643
+ 14,14
644
+ 10,10
645
+ 27,3
646
+ 27,0
647
+ 27,27
648
+ 7,7
649
+ 27,27
650
+ 20,20
651
+ 13,13
652
+ 27,27
653
+ 27,4
654
+ 0,27
655
+ 10,10
656
+ 6,27
657
+ 27,27
658
+ 25,14
659
+ 4,4
660
+ 14,14
661
+ 27,27
662
+ 18,18
663
+ 25,3
664
+ 9,9
665
+ 3,27
666
+ 27,27
667
+ 27,27
668
+ 27,27
669
+ 27,27
670
+ 15,15
671
+ 2,2
672
+ 22,1
673
+ 0,0
674
+ 2,11
675
+ 3,27
676
+ 18,18
677
+ 0,0
678
+ 7,26
679
+ 18,18
680
+ 3,27
681
+ 10,10
682
+ 4,27
683
+ 4,4
684
+ 0,1
685
+ 17,17
686
+ 27,27
687
+ 3,3
688
+ 25,5
689
+ 27,24
690
+ 15,15
691
+ 18,18
692
+ 5,15
693
+ 0,0
694
+ 27,27
695
+ 3,27
696
+ 15,15
697
+ 4,4
698
+ 27,18
699
+ 7,7
700
+ 11,11
701
+ 0,8
702
+ 0,27
703
+ 15,15
704
+ 22,27
705
+ 10,10
706
+ 11,27
707
+ 27,27
708
+ 27,7
709
+ 27,0
710
+ 23,25
711
+ 22,27
712
+ 2,2
713
+ 27,3
714
+ 27,27
715
+ 27,27
716
+ 22,26
717
+ 14,14
718
+ 18,18
719
+ 7,14
720
+ 0,0
721
+ 0,0
722
+ 27,27
723
+ 7,7
724
+ 4,4
725
+ 17,17
726
+ 7,7
727
+ 27,27
728
+ 0,27
729
+ 27,27
730
+ 27,27
731
+ 0,0
732
+ 1,1
733
+ 27,3
734
+ 15,15
735
+ 7,6
736
+ 27,7
737
+ 27,27
738
+ 25,24
739
+ 0,0
740
+ 1,1
741
+ 27,3
742
+ 27,0
743
+ 27,18
744
+ 1,1
745
+ 27,27
746
+ 17,17
747
+ 0,0
748
+ 27,27
749
+ 27,6
750
+ 0,0
751
+ 27,27
752
+ 27,27
753
+ 2,15
754
+ 7,7
755
+ 15,0
756
+ 3,3
757
+ 17,1
758
+ 0,27
759
+ 27,27
760
+ 27,4
761
+ 17,1
762
+ 27,27
763
+ 27,27
764
+ 27,27
765
+ 27,0
766
+ 15,0
767
+ 19,25
768
+ 0,0
769
+ 9,6
770
+ 27,27
771
+ 9,9
772
+ 4,4
773
+ 27,27
774
+ 16,27
775
+ 27,3
776
+ 27,0
777
+ 15,0
778
+ 0,0
779
+ 27,27
780
+ 27,27
781
+ 2,2
782
+ 27,27
783
+ 27,2
784
+ 20,20
785
+ 17,20
786
+ 17,17
787
+ 11,3
788
+ 27,27
789
+ 25,8
790
+ 20,20
791
+ 15,15
792
+ 0,0
793
+ 13,0
794
+ 27,9
795
+ 7,27
796
+ 26,26
797
+ 27,1
798
+ 25,27
799
+ 27,2
800
+ 22,27
801
+ 10,10
802
+ 3,27
803
+ 2,2
804
+ 15,15
805
+ 4,27
806
+ 5,27
807
+ 27,27
808
+ 27,1
809
+ 3,3
810
+ 3,27
811
+ 15,15
812
+ 10,10
813
+ 27,27
814
+ 27,27
815
+ 27,27
816
+ 8,27
817
+ 0,27
818
+ 5,5
819
+ 8,8
820
+ 1,1
821
+ 27,27
822
+ 17,17
823
+ 5,5
824
+ 3,27
825
+ 12,27
826
+ 11,1
827
+ 23,0
828
+ 10,10
829
+ 27,27
830
+ 27,27
831
+ 6,6
832
+ 27,27
833
+ 7,7
834
+ 0,0
835
+ 0,0
836
+ 1,1
837
+ 14,14
838
+ 17,5
839
+ 1,1
840
+ 8,8
841
+ 24,24
842
+ 22,27
843
+ 0,15
844
+ 26,26
845
+ 11,11
846
+ 0,27
847
+ 4,27
848
+ 5,5
849
+ 0,0
850
+ 27,27
851
+ 1,1
852
+ 15,7
853
+ 5,14
854
+ 15,15
855
+ 11,1
856
+ 27,27
857
+ 2,27
858
+ 23,4
859
+ 15,15
860
+ 15,15
861
+ 17,25
862
+ 27,20
863
+ 3,27
864
+ 27,27
865
+ 27,27
866
+ 23,27
867
+ 15,15
868
+ 27,0
869
+ 27,27
870
+ 27,25
871
+ 20,8
872
+ 15,15
873
+ 0,13
874
+ 27,27
875
+ 27,24
876
+ 27,27
877
+ 27,27
878
+ 0,0
879
+ 15,15
880
+ 25,25
881
+ 27,27
882
+ 4,5
883
+ 15,15
884
+ 15,15
885
+ 27,26
886
+ 7,6
887
+ 17,17
888
+ 1,1
889
+ 13,27
890
+ 6,6
891
+ 15,15
892
+ 3,3
893
+ 27,26
894
+ 22,27
895
+ 27,27
896
+ 27,27
897
+ 27,27
898
+ 27,27
899
+ 10,27
900
+ 20,20
901
+ 2,27
902
+ 27,27
903
+ 27,1
904
+ 0,0
905
+ 6,7
906
+ 14,27
907
+ 27,27
908
+ 1,0
909
+ 4,4
910
+ 4,27
911
+ 15,15
912
+ 27,10
913
+ 12,9
914
+ 10,27
915
+ 7,26
916
+ 1,1
917
+ 26,26
918
+ 18,18
919
+ 7,7
920
+ 25,24
921
+ 27,27
922
+ 18,18
923
+ 9,27
924
+ 1,0
925
+ 15,15
926
+ 27,27
927
+ 2,27
928
+ 27,27
929
+ 27,7
930
+ 4,0
931
+ 20,20
932
+ 27,13
933
+ 27,27
934
+ 0,0
935
+ 9,27
936
+ 15,15
937
+ 27,10
938
+ 10,9
939
+ 22,26
940
+ 25,27
941
+ 15,15
942
+ 19,14
943
+ 4,4
944
+ 27,27
945
+ 5,17
946
+ 2,2
947
+ 5,18
948
+ 8,8
949
+ 27,27
950
+ 15,15
951
+ 0,0
952
+ 27,27
953
+ 11,9
954
+ 2,3
955
+ 25,24
956
+ 3,27
957
+ 27,27
958
+ 27,27
959
+ 0,0
960
+ 4,4
961
+ 13,17
962
+ 3,27
963
+ 7,7
964
+ 4,4
965
+ 1,1
966
+ 5,5
967
+ 1,1
968
+ 3,27
969
+ 4,7
970
+ 25,27
971
+ 27,27
972
+ 27,25
973
+ 5,5
974
+ 12,12
975
+ 1,1
976
+ 4,4
977
+ 0,0
978
+ 1,1
979
+ 0,27
980
+ 27,27
981
+ 27,27
982
+ 8,27
983
+ 27,27
984
+ 13,13
985
+ 2,1
986
+ 27,27
987
+ 1,10
988
+ 27,27
989
+ 7,7
990
+ 20,20
991
+ 27,4
992
+ 27,4
993
+ 20,20
994
+ 14,14
995
+ 3,27
996
+ 2,27
997
+ 10,10
998
+ 15,15
999
+ 27,7
1000
+ 0,0
1001
+ 0,0
1002
+ 18,18
1003
+ 27,10
1004
+ 4,27
1005
+ 4,0
1006
+ 7,27
1007
+ 18,18
1008
+ 14,3
1009
+ 1,1
1010
+ 4,27
1011
+ 18,18
1012
+ 15,15
1013
+ 15,15
1014
+ 27,27
1015
+ 11,3
1016
+ 4,27
1017
+ 26,7
1018
+ 27,27
1019
+ 27,27
1020
+ 1,7
1021
+ 15,0
1022
+ 27,27
1023
+ 27,27
1024
+ 15,15
1025
+ 4,4
1026
+ 27,27
1027
+ 27,7
1028
+ 7,7
1029
+ 27,27
1030
+ 27,27
1031
+ 10,27
1032
+ 18,4
1033
+ 7,7
1034
+ 27,27
1035
+ 27,10
1036
+ 2,7
1037
+ 9,20
1038
+ 1,1
1039
+ 20,4
1040
+ 27,27
1041
+ 27,27
1042
+ 27,27
1043
+ 15,15
1044
+ 7,5
1045
+ 0,0
1046
+ 18,18
1047
+ 27,7
1048
+ 24,17
1049
+ 3,10
1050
+ 2,3
1051
+ 0,17
1052
+ 27,15
1053
+ 4,4
1054
+ 3,27
1055
+ 20,27
1056
+ 27,27
1057
+ 27,27
1058
+ 6,7
1059
+ 27,27
1060
+ 3,27
1061
+ 4,27
1062
+ 1,27
1063
+ 0,15
1064
+ 3,27
1065
+ 14,1
1066
+ 10,27
1067
+ 24,24
1068
+ 0,27
1069
+ 27,27
1070
+ 10,10
1071
+ 15,15
1072
+ 3,10
1073
+ 20,20
1074
+ 27,27
1075
+ 0,0
1076
+ 27,6
1077
+ 2,27
1078
+ 27,27
1079
+ 27,27
1080
+ 17,18
1081
+ 6,27
1082
+ 27,27
1083
+ 0,0
1084
+ 18,18
1085
+ 26,6
1086
+ 2,7
1087
+ 27,27
1088
+ 27,27
1089
+ 25,25
1090
+ 3,3
1091
+ 1,1
1092
+ 26,26
1093
+ 27,27
1094
+ 27,27
1095
+ 3,27
1096
+ 0,0
1097
+ 27,27
1098
+ 15,15
1099
+ 17,17
1100
+ 4,4
1101
+ 25,27
1102
+ 27,27
1103
+ 10,6
1104
+ 15,15
1105
+ 27,27
1106
+ 0,17
1107
+ 27,6
1108
+ 11,14
1109
+ 4,0
1110
+ 0,0
1111
+ 18,27
1112
+ 27,27
1113
+ 18,8
1114
+ 5,5
1115
+ 27,27
1116
+ 10,27
1117
+ 27,27
1118
+ 17,17
1119
+ 2,14
1120
+ 0,0
1121
+ 0,0
1122
+ 27,27
1123
+ 17,27
1124
+ 27,27
1125
+ 0,0
1126
+ 3,27
1127
+ 27,27
1128
+ 27,27
1129
+ 7,27
1130
+ 3,3
1131
+ 18,18
1132
+ 13,13
1133
+ 1,1
1134
+ 3,4
1135
+ 0,0
1136
+ 11,27
1137
+ 27,27
1138
+ 1,1
1139
+ 0,0
1140
+ 13,27
1141
+ 26,26
1142
+ 27,27
1143
+ 20,20
1144
+ 27,27
1145
+ 17,18
1146
+ 0,0
1147
+ 5,17
1148
+ 4,27
1149
+ 27,27
1150
+ 26,27
1151
+ 27,27
1152
+ 17,0
1153
+ 0,17
1154
+ 25,27
1155
+ 17,17
1156
+ 6,7
1157
+ 27,27
1158
+ 27,27
1159
+ 27,27
1160
+ 0,0
1161
+ 27,7
1162
+ 15,15
1163
+ 27,27
1164
+ 0,0
1165
+ 15,15
1166
+ 27,27
1167
+ 27,27
1168
+ 27,27
1169
+ 26,26
1170
+ 1,17
1171
+ 4,3
1172
+ 12,27
1173
+ 11,3
1174
+ 27,27
1175
+ 27,27
1176
+ 0,18
1177
+ 15,15
1178
+ 1,1
1179
+ 22,27
1180
+ 22,24
1181
+ 27,27
1182
+ 3,20
1183
+ 1,1
1184
+ 3,27
1185
+ 27,27
1186
+ 4,0
1187
+ 0,18
1188
+ 1,7
1189
+ 2,7
1190
+ 9,14
1191
+ 4,4
1192
+ 1,1
1193
+ 26,26
1194
+ 27,27
1195
+ 27,27
1196
+ 4,27
1197
+ 2,2
1198
+ 27,0
1199
+ 3,9
1200
+ 1,1
1201
+ 0,0
1202
+ 0,0
1203
+ 0,0
1204
+ 0,0
1205
+ 6,6
1206
+ 4,27
1207
+ 0,0
1208
+ 7,7
1209
+ 27,27
1210
+ 27,27
1211
+ 1,27
1212
+ 22,27
1213
+ 27,27
1214
+ 10,27
1215
+ 7,7
1216
+ 27,27
1217
+ 27,4
1218
+ 27,0
1219
+ 7,7
1220
+ 27,3
1221
+ 15,15
1222
+ 18,18
1223
+ 27,27
1224
+ 19,27
1225
+ 26,26
1226
+ 27,27
1227
+ 27,27
1228
+ 11,11
1229
+ 27,27
1230
+ 24,1
1231
+ 27,27
1232
+ 10,27
1233
+ 27,27
1234
+ 27,27
1235
+ 25,25
1236
+ 18,10
1237
+ 4,27
1238
+ 20,8
1239
+ 4,0
1240
+ 27,27
1241
+ 5,25
1242
+ 27,0
1243
+ 0,0
1244
+ 27,27
1245
+ 27,22
1246
+ 22,22
1247
+ 27,27
1248
+ 7,7
1249
+ 7,18
1250
+ 10,27
1251
+ 17,17
1252
+ 5,5
1253
+ 9,27
1254
+ 27,10
1255
+ 7,7
1256
+ 27,6
1257
+ 27,0
1258
+ 2,26
1259
+ 27,27
1260
+ 0,0
1261
+ 10,2
1262
+ 1,1
1263
+ 27,7
1264
+ 27,27
1265
+ 27,22
1266
+ 0,0
1267
+ 0,0
1268
+ 18,27
1269
+ 3,27
1270
+ 11,26
1271
+ 2,27
1272
+ 25,11
1273
+ 27,27
1274
+ 27,27
1275
+ 22,27
1276
+ 3,3
1277
+ 8,8
1278
+ 7,6
1279
+ 15,15
1280
+ 6,7
1281
+ 10,10
1282
+ 0,0
1283
+ 0,0
1284
+ 0,18
1285
+ 7,7
1286
+ 17,27
1287
+ 0,0
1288
+ 1,1
1289
+ 27,7
1290
+ 8,27
1291
+ 27,27
1292
+ 20,20
1293
+ 4,4
1294
+ 11,11
1295
+ 3,27
1296
+ 19,9
1297
+ 5,5
1298
+ 27,27
1299
+ 27,6
1300
+ 4,27
1301
+ 27,7
1302
+ 27,7
1303
+ 0,0
1304
+ 7,27
1305
+ 5,5
1306
+ 27,27
1307
+ 27,27
1308
+ 4,1
1309
+ 13,26
1310
+ 3,11
1311
+ 6,6
1312
+ 8,18
1313
+ 27,27
1314
+ 27,27
1315
+ 27,27
1316
+ 14,14
1317
+ 27,27
1318
+ 6,3
1319
+ 27,27
1320
+ 26,27
1321
+ 0,6
1322
+ 3,27
1323
+ 3,3
1324
+ 0,0
1325
+ 27,26
1326
+ 5,5
1327
+ 0,0
1328
+ 27,25
1329
+ 14,3
1330
+ 4,10
1331
+ 13,13
1332
+ 4,4
1333
+ 27,27
1334
+ 4,4
1335
+ 22,1
1336
+ 27,3
1337
+ 5,5
1338
+ 2,2
1339
+ 27,27
1340
+ 27,27
1341
+ 11,11
1342
+ 15,15
1343
+ 18,27
1344
+ 0,0
1345
+ 27,7
1346
+ 4,27
1347
+ 15,15
1348
+ 27,27
1349
+ 25,26
1350
+ 3,3
1351
+ 3,27
1352
+ 18,18
1353
+ 27,27
1354
+ 3,27
1355
+ 15,15
1356
+ 20,20
1357
+ 27,27
1358
+ 10,6
1359
+ 27,27
1360
+ 12,11
1361
+ 10,4
1362
+ 13,13
1363
+ 5,15
1364
+ 3,10
1365
+ 2,3
1366
+ 0,0
1367
+ 17,17
1368
+ 3,2
1369
+ 7,27
1370
+ 25,25
1371
+ 0,0
1372
+ 27,27
1373
+ 15,15
1374
+ 9,27
1375
+ 10,10
1376
+ 11,11
1377
+ 27,7
1378
+ 27,27
1379
+ 4,27
1380
+ 27,27
1381
+ 24,24
1382
+ 6,7
1383
+ 9,10
1384
+ 7,1
1385
+ 27,27
1386
+ 27,7
1387
+ 0,0
1388
+ 6,27
1389
+ 18,18
1390
+ 27,27
1391
+ 25,25
1392
+ 27,2
1393
+ 7,7
1394
+ 7,27
1395
+ 27,27
1396
+ 27,27
1397
+ 15,15
1398
+ 5,27
1399
+ 1,1
1400
+ 27,6
1401
+ 13,13
1402
+ 7,20
1403
+ 6,7
1404
+ 15,15
1405
+ 27,27
1406
+ 27,7
1407
+ 26,27
1408
+ 1,1
1409
+ 4,27
1410
+ 13,7
1411
+ 22,27
1412
+ 27,27
1413
+ 10,1
1414
+ 18,18
1415
+ 10,10
1416
+ 26,0
1417
+ 13,13
1418
+ 4,27
1419
+ 3,0
1420
+ 27,9
1421
+ 15,0
1422
+ 8,8
1423
+ 27,12
1424
+ 3,2
1425
+ 18,18
1426
+ 0,0
1427
+ 8,8
1428
+ 1,26
1429
+ 27,27
1430
+ 10,27
1431
+ 0,0
1432
+ 0,0
1433
+ 2,2
1434
+ 4,27
1435
+ 10,11
1436
+ 7,7
1437
+ 27,27
1438
+ 0,0
1439
+ 25,25
1440
+ 26,2
1441
+ 9,9
1442
+ 27,7
1443
+ 7,26
1444
+ 27,27
1445
+ 20,20
1446
+ 27,27
1447
+ 0,17
1448
+ 3,27
1449
+ 2,2
1450
+ 27,27
1451
+ 0,27
1452
+ 7,7
1453
+ 12,12
1454
+ 5,5
1455
+ 3,7
1456
+ 2,2
1457
+ 17,17
1458
+ 15,0
1459
+ 4,27
1460
+ 0,0
1461
+ 27,4
1462
+ 27,27
1463
+ 3,3
1464
+ 9,9
1465
+ 27,4
1466
+ 27,27
1467
+ 15,0
1468
+ 7,7
1469
+ 27,13
1470
+ 18,18
1471
+ 14,14
1472
+ 27,7
1473
+ 1,1
1474
+ 18,18
1475
+ 7,7
1476
+ 10,10
1477
+ 0,0
1478
+ 4,0
1479
+ 15,15
1480
+ 15,15
1481
+ 6,6
1482
+ 4,4
1483
+ 5,27
1484
+ 27,0
1485
+ 1,1
1486
+ 27,27
1487
+ 3,27
1488
+ 27,10
1489
+ 0,0
1490
+ 17,17
1491
+ 15,15
1492
+ 0,5
1493
+ 0,0
1494
+ 27,3
1495
+ 7,7
1496
+ 4,4
1497
+ 9,10
1498
+ 10,24
1499
+ 17,0
1500
+ 11,11
1501
+ 24,25
1502
+ 2,2
1503
+ 27,2
1504
+ 27,27
1505
+ 2,2
1506
+ 10,27
1507
+ 9,9
1508
+ 2,3
1509
+ 1,1
1510
+ 19,27
1511
+ 10,27
1512
+ 10,18
1513
+ 0,0
1514
+ 10,10
1515
+ 13,13
1516
+ 25,4
1517
+ 17,17
1518
+ 1,1
1519
+ 18,18
1520
+ 22,27
1521
+ 10,10
1522
+ 15,1
1523
+ 13,27
1524
+ 14,27
1525
+ 27,27
1526
+ 27,27
1527
+ 3,27
1528
+ 26,26
1529
+ 27,27
1530
+ 3,5
1531
+ 7,7
1532
+ 0,0
1533
+ 18,18
1534
+ 3,27
1535
+ 27,27
1536
+ 27,27
1537
+ 15,15
1538
+ 9,2
1539
+ 22,26
1540
+ 18,18
1541
+ 0,0
1542
+ 0,0
1543
+ 20,5
1544
+ 22,27
1545
+ 10,10
1546
+ 27,27
1547
+ 3,27
1548
+ 19,14
1549
+ 27,27
1550
+ 22,27
1551
+ 27,27
1552
+ 22,27
1553
+ 27,27
1554
+ 5,5
1555
+ 3,0
1556
+ 27,27
1557
+ 7,27
1558
+ 18,18
1559
+ 27,27
1560
+ 15,15
1561
+ 18,18
1562
+ 6,7
1563
+ 0,0
1564
+ 17,17
1565
+ 0,27
1566
+ 0,0
1567
+ 27,27
1568
+ 27,13
1569
+ 3,2
1570
+ 4,27
1571
+ 0,0
1572
+ 18,18
1573
+ 27,27
1574
+ 1,1
1575
+ 27,27
1576
+ 15,15
1577
+ 6,6
1578
+ 27,27
1579
+ 22,27
1580
+ 18,18
1581
+ 7,27
1582
+ 15,15
1583
+ 27,27
1584
+ 10,10
1585
+ 2,3
1586
+ 27,3
1587
+ 18,18
1588
+ 0,17
1589
+ 9,25
1590
+ 0,26
1591
+ 27,3
1592
+ 15,15
1593
+ 1,1
1594
+ 27,27
1595
+ 27,27
1596
+ 10,15
1597
+ 27,6
1598
+ 27,2
1599
+ 1,1
1600
+ 25,0
1601
+ 15,15
1602
+ 15,15
1603
+ 0,0
1604
+ 4,4
1605
+ 11,3
1606
+ 27,27
1607
+ 0,0
1608
+ 27,27
1609
+ 27,7
1610
+ 10,10
1611
+ 25,25
1612
+ 4,27
1613
+ 7,27
1614
+ 2,27
1615
+ 3,7
1616
+ 0,0
1617
+ 15,15
1618
+ 27,4
1619
+ 27,1
1620
+ 1,1
1621
+ 15,15
1622
+ 0,5
1623
+ 7,26
1624
+ 18,18
1625
+ 10,27
1626
+ 10,25
1627
+ 1,1
1628
+ 10,27
1629
+ 15,15
1630
+ 13,7
1631
+ 13,27
1632
+ 27,27
1633
+ 4,27
1634
+ 27,27
1635
+ 27,27
1636
+ 0,1
1637
+ 27,11
1638
+ 27,7
1639
+ 4,4
1640
+ 0,0
1641
+ 18,18
1642
+ 9,9
1643
+ 4,18
1644
+ 24,24
1645
+ 2,18
1646
+ 15,15
1647
+ 0,0
1648
+ 26,27
1649
+ 0,27
1650
+ 27,3
1651
+ 27,27
1652
+ 0,0
1653
+ 27,27
1654
+ 10,27
1655
+ 6,6
1656
+ 26,26
1657
+ 7,7
1658
+ 17,17
1659
+ 10,25
1660
+ 27,27
1661
+ 27,27
1662
+ 18,18
1663
+ 1,1
1664
+ 27,27
1665
+ 13,17
1666
+ 27,27
1667
+ 0,18
1668
+ 27,7
1669
+ 13,20
1670
+ 22,4
1671
+ 27,27
1672
+ 27,27
1673
+ 27,1
1674
+ 0,0
1675
+ 11,11
1676
+ 27,25
1677
+ 13,17
1678
+ 1,1
1679
+ 27,14
1680
+ 11,11
1681
+ 0,0
1682
+ 4,4
1683
+ 25,25
1684
+ 8,8
1685
+ 18,18
1686
+ 1,1
1687
+ 7,26
1688
+ 6,27
1689
+ 27,7
1690
+ 18,18
1691
+ 18,18
1692
+ 27,4
1693
+ 22,26
1694
+ 0,26
1695
+ 22,27
1696
+ 18,18
1697
+ 3,0
1698
+ 27,27
1699
+ 0,25
1700
+ 3,27
1701
+ 3,0
1702
+ 11,10
1703
+ 27,1
1704
+ 18,4
1705
+ 27,27
1706
+ 0,0
1707
+ 27,27
1708
+ 4,27
1709
+ 27,13
1710
+ 27,27
1711
+ 27,27
1712
+ 20,5
1713
+ 0,0
1714
+ 27,3
1715
+ 27,27
1716
+ 27,27
1717
+ 3,3
1718
+ 7,27
1719
+ 20,20
1720
+ 3,27
1721
+ 20,0
1722
+ 13,17
1723
+ 27,27
1724
+ 3,27
1725
+ 27,27
1726
+ 4,27
1727
+ 4,27
1728
+ 5,27
1729
+ 0,4
1730
+ 7,7
1731
+ 7,7
1732
+ 0,27
1733
+ 25,12
1734
+ 0,0
1735
+ 27,10
1736
+ 10,10
1737
+ 27,27
1738
+ 24,24
1739
+ 9,9
1740
+ 24,24
1741
+ 4,4
1742
+ 6,10
1743
+ 17,17
1744
+ 0,0
1745
+ 2,2
1746
+ 27,27
1747
+ 27,27
1748
+ 10,10
1749
+ 4,27
1750
+ 3,10
1751
+ 25,25
1752
+ 27,27
1753
+ 0,0
1754
+ 27,27
1755
+ 19,7
1756
+ 27,27
1757
+ 26,3
1758
+ 27,27
1759
+ 18,4
1760
+ 27,27
1761
+ 27,27
1762
+ 3,27
1763
+ 27,27
1764
+ 27,27
1765
+ 1,27
1766
+ 27,27
1767
+ 0,18
1768
+ 2,2
1769
+ 18,18
1770
+ 20,27
1771
+ 26,26
1772
+ 0,0
1773
+ 27,27
1774
+ 3,7
1775
+ 27,27
1776
+ 15,15
1777
+ 10,27
1778
+ 6,6
1779
+ 1,1
1780
+ 21,0
1781
+ 27,5
1782
+ 27,0
1783
+ 25,9
1784
+ 11,14
1785
+ 27,27
1786
+ 27,4
1787
+ 1,1
1788
+ 20,20
1789
+ 0,15
1790
+ 27,27
1791
+ 4,4
1792
+ 27,14
1793
+ 25,27
1794
+ 22,27
1795
+ 0,13
1796
+ 0,0
1797
+ 18,18
1798
+ 27,27
1799
+ 27,27
1800
+ 27,27
1801
+ 9,27
1802
+ 27,10
1803
+ 17,1
1804
+ 0,1
1805
+ 4,3
1806
+ 3,3
1807
+ 17,17
1808
+ 5,5
1809
+ 27,27
1810
+ 27,4
1811
+ 0,0
1812
+ 4,4
1813
+ 27,7
1814
+ 15,4
1815
+ 7,25
1816
+ 1,1
1817
+ 7,6
1818
+ 7,18
1819
+ 1,27
1820
+ 1,1
1821
+ 0,0
1822
+ 1,1
1823
+ 1,1
1824
+ 10,27
1825
+ 0,0
1826
+ 2,2
1827
+ 10,10
1828
+ 10,0
1829
+ 27,9
1830
+ 1,1
1831
+ 27,0
1832
+ 27,27
1833
+ 22,27
1834
+ 7,7
1835
+ 0,0
1836
+ 27,11
1837
+ 26,26
1838
+ 9,6
1839
+ 14,14
1840
+ 18,18
1841
+ 27,7
1842
+ 1,1
1843
+ 27,27
1844
+ 7,27
1845
+ 18,0
1846
+ 27,27
1847
+ 27,27
1848
+ 27,0
1849
+ 20,20
1850
+ 7,27
1851
+ 0,0
1852
+ 27,27
1853
+ 18,0
1854
+ 5,27
1855
+ 18,18
1856
+ 3,24
1857
+ 10,1
1858
+ 6,27
1859
+ 27,4
1860
+ 27,10
1861
+ 20,20
1862
+ 25,25
1863
+ 27,27
1864
+ 27,3
1865
+ 13,13
1866
+ 2,2
1867
+ 26,0
1868
+ 10,11
1869
+ 10,27
1870
+ 25,27
1871
+ 3,27
1872
+ 27,27
1873
+ 3,10
1874
+ 0,0
1875
+ 10,10
1876
+ 13,27
1877
+ 6,6
1878
+ 0,0
1879
+ 1,1
1880
+ 27,27
1881
+ 3,2
1882
+ 27,10
1883
+ 3,2
1884
+ 1,1
1885
+ 17,17
1886
+ 7,27
1887
+ 24,24
1888
+ 15,15
1889
+ 3,3
1890
+ 0,27
1891
+ 20,8
1892
+ 27,7
1893
+ 2,3
1894
+ 27,27
1895
+ 27,10
1896
+ 15,15
1897
+ 9,27
1898
+ 27,27
1899
+ 27,27
1900
+ 15,15
1901
+ 13,27
1902
+ 27,27
1903
+ 20,27
1904
+ 27,27
1905
+ 27,27
1906
+ 15,15
1907
+ 27,27
1908
+ 6,6
1909
+ 27,27
1910
+ 3,27
1911
+ 6,24
1912
+ 22,11
1913
+ 27,27
1914
+ 27,4
1915
+ 7,7
1916
+ 25,10
1917
+ 27,6
1918
+ 27,27
1919
+ 27,27
1920
+ 27,27
1921
+ 0,4
1922
+ 4,4
1923
+ 27,27
1924
+ 10,10
1925
+ 1,8
1926
+ 2,27
1927
+ 3,11
1928
+ 0,20
1929
+ 27,2
1930
+ 5,0
1931
+ 15,0
1932
+ 27,27
1933
+ 1,1
1934
+ 9,27
1935
+ 9,27
1936
+ 15,15
1937
+ 27,27
1938
+ 27,5
1939
+ 0,0
1940
+ 15,0
1941
+ 25,25
1942
+ 0,0
1943
+ 3,1
1944
+ 27,27
1945
+ 20,20
1946
+ 0,4
1947
+ 5,27
1948
+ 10,10
1949
+ 15,15
1950
+ 1,1
1951
+ 18,18
1952
+ 6,7
1953
+ 0,18
1954
+ 1,1
1955
+ 27,27
1956
+ 4,27
1957
+ 15,0
1958
+ 27,27
1959
+ 9,27
1960
+ 1,1
1961
+ 20,20
1962
+ 20,20
1963
+ 15,15
1964
+ 1,1
1965
+ 27,27
1966
+ 6,6
1967
+ 3,3
1968
+ 4,27
1969
+ 27,27
1970
+ 1,1
1971
+ 15,7
1972
+ 10,27
1973
+ 27,27
1974
+ 5,5
1975
+ 27,27
1976
+ 27,1
1977
+ 10,10
1978
+ 22,27
1979
+ 15,17
1980
+ 2,2
1981
+ 1,1
1982
+ 27,6
1983
+ 27,27
1984
+ 5,5
1985
+ 27,27
1986
+ 18,18
1987
+ 27,6
1988
+ 27,27
1989
+ 15,15
1990
+ 20,20
1991
+ 9,2
1992
+ 27,27
1993
+ 0,0
1994
+ 4,27
1995
+ 0,0
1996
+ 27,6
1997
+ 1,1
1998
+ 27,27
1999
+ 0,0
2000
+ 27,27
2001
+ 10,0
2002
+ 27,27
2003
+ 4,4
2004
+ 11,11
2005
+ 17,17
2006
+ 10,10
2007
+ 22,27
2008
+ 15,15
2009
+ 5,27
2010
+ 27,27
2011
+ 27,27
2012
+ 7,27
2013
+ 1,27
2014
+ 0,0
2015
+ 7,7
2016
+ 0,0
2017
+ 11,11
2018
+ 18,18
2019
+ 1,1
2020
+ 0,0
2021
+ 0,0
2022
+ 4,27
2023
+ 3,11
2024
+ 15,15
2025
+ 22,10
2026
+ 15,15
2027
+ 22,6
2028
+ 18,18
2029
+ 27,27
2030
+ 27,27
2031
+ 15,15
2032
+ 4,4
2033
+ 20,20
2034
+ 20,20
2035
+ 4,4
2036
+ 10,27
2037
+ 27,27
2038
+ 17,17
2039
+ 3,3
2040
+ 27,27
2041
+ 1,1
2042
+ 9,18
2043
+ 27,4
2044
+ 27,7
2045
+ 6,6
2046
+ 2,2
2047
+ 11,27
2048
+ 10,2
2049
+ 27,27
2050
+ 4,9
2051
+ 0,0
2052
+ 15,2
2053
+ 26,26
2054
+ 1,1
2055
+ 14,10
2056
+ 4,18
2057
+ 22,27
2058
+ 24,24
2059
+ 4,4
2060
+ 10,1
2061
+ 27,7
2062
+ 4,27
2063
+ 0,0
2064
+ 4,4
2065
+ 27,4
2066
+ 10,6
2067
+ 27,10
2068
+ 27,27
2069
+ 21,11
2070
+ 10,27
2071
+ 17,17
2072
+ 27,27
2073
+ 27,27
2074
+ 7,7
2075
+ 11,11
2076
+ 3,2
2077
+ 27,27
2078
+ 18,18
2079
+ 27,27
2080
+ 27,3
2081
+ 27,27
2082
+ 4,3
2083
+ 3,10
2084
+ 4,7
2085
+ 27,27
2086
+ 0,27
2087
+ 2,27
2088
+ 27,27
2089
+ 17,27
2090
+ 27,27
2091
+ 2,7
2092
+ 2,27
2093
+ 13,13
2094
+ 3,27
2095
+ 26,27
2096
+ 27,18
2097
+ 27,27
2098
+ 0,0
2099
+ 18,18
2100
+ 0,0
2101
+ 27,27
2102
+ 25,25
2103
+ 10,27
2104
+ 15,15
2105
+ 27,27
2106
+ 12,27
2107
+ 27,8
2108
+ 3,3
2109
+ 9,10
2110
+ 2,2
2111
+ 27,4
2112
+ 12,22
2113
+ 2,27
2114
+ 12,22
2115
+ 0,4
2116
+ 1,1
2117
+ 8,27
2118
+ 4,4
2119
+ 3,27
2120
+ 17,26
2121
+ 22,27
2122
+ 22,26
2123
+ 9,3
2124
+ 1,1
2125
+ 27,0
2126
+ 0,27
2127
+ 27,7
2128
+ 27,27
2129
+ 17,17
2130
+ 3,3
2131
+ 27,6
2132
+ 15,15
2133
+ 7,7
2134
+ 10,10
2135
+ 27,27
2136
+ 3,3
2137
+ 4,4
2138
+ 18,0
2139
+ 27,27
2140
+ 15,15
2141
+ 22,27
2142
+ 27,27
2143
+ 25,25
2144
+ 0,17
2145
+ 2,18
2146
+ 27,27
2147
+ 3,14
2148
+ 2,27
2149
+ 20,15
2150
+ 0,0
2151
+ 12,12
2152
+ 0,18
2153
+ 9,27
2154
+ 17,0
2155
+ 2,2
2156
+ 27,0
2157
+ 14,27
2158
+ 27,27
2159
+ 27,27
2160
+ 15,15
2161
+ 27,27
2162
+ 18,18
2163
+ 27,5
2164
+ 7,7
2165
+ 0,0
2166
+ 14,14
2167
+ 27,27
2168
+ 0,18
2169
+ 1,15
2170
+ 27,3
2171
+ 27,1
2172
+ 7,20
2173
+ 18,18
2174
+ 27,2
2175
+ 6,6
2176
+ 5,5
2177
+ 1,1
2178
+ 27,27
2179
+ 27,7
2180
+ 0,0
2181
+ 27,27
2182
+ 8,8
2183
+ 24,15
2184
+ 0,0
2185
+ 0,13
2186
+ 17,17
2187
+ 6,7
2188
+ 27,27
2189
+ 14,14
2190
+ 10,7
2191
+ 6,6
2192
+ 4,27
2193
+ 20,20
2194
+ 27,17
2195
+ 27,0
2196
+ 0,0
2197
+ 0,18
2198
+ 10,27
2199
+ 26,10
2200
+ 27,27
2201
+ 27,27
2202
+ 18,0
2203
+ 3,3
2204
+ 0,0
2205
+ 1,1
2206
+ 7,27
2207
+ 22,27
2208
+ 12,12
2209
+ 27,25
2210
+ 27,27
2211
+ 18,18
2212
+ 11,27
2213
+ 20,17
2214
+ 4,27
2215
+ 22,4
2216
+ 27,27
2217
+ 15,15
2218
+ 3,27
2219
+ 11,20
2220
+ 0,0
2221
+ 1,1
2222
+ 7,7
2223
+ 1,1
2224
+ 27,27
2225
+ 27,27
2226
+ 3,27
2227
+ 25,0
2228
+ 2,0
2229
+ 1,1
2230
+ 1,1
2231
+ 27,27
2232
+ 4,14
2233
+ 27,27
2234
+ 0,0
2235
+ 26,0
2236
+ 3,2
2237
+ 18,18
2238
+ 10,27
2239
+ 27,27
2240
+ 4,4
2241
+ 27,27
2242
+ 25,27
2243
+ 0,0
2244
+ 15,17
2245
+ 18,18
2246
+ 1,1
2247
+ 27,27
2248
+ 0,0
2249
+ 27,20
2250
+ 27,27
2251
+ 0,11
2252
+ 27,8
2253
+ 2,25
2254
+ 25,25
2255
+ 0,0
2256
+ 18,18
2257
+ 1,1
2258
+ 3,3
2259
+ 27,27
2260
+ 27,4
2261
+ 4,27
2262
+ 27,27
2263
+ 27,27
2264
+ 27,27
2265
+ 14,14
2266
+ 7,7
2267
+ 27,27
2268
+ 27,27
2269
+ 22,27
2270
+ 27,27
2271
+ 27,27
2272
+ 27,27
2273
+ 27,27
2274
+ 7,7
2275
+ 10,27
2276
+ 24,24
2277
+ 27,15
2278
+ 10,3
2279
+ 4,10
2280
+ 3,2
2281
+ 7,7
2282
+ 27,0
2283
+ 0,1
2284
+ 27,27
2285
+ 5,27
2286
+ 11,10
2287
+ 22,27
2288
+ 17,13
2289
+ 9,11
2290
+ 18,18
2291
+ 10,27
2292
+ 25,24
2293
+ 27,27
2294
+ 15,15
2295
+ 17,1
2296
+ 27,7
2297
+ 17,17
2298
+ 15,15
2299
+ 27,9
2300
+ 0,4
2301
+ 4,27
2302
+ 17,17
2303
+ 10,10
2304
+ 7,7
2305
+ 27,27
2306
+ 18,18
2307
+ 7,27
2308
+ 15,0
2309
+ 2,27
2310
+ 4,27
2311
+ 4,27
2312
+ 15,15
2313
+ 17,17
2314
+ 22,0
2315
+ 2,11
2316
+ 22,27
2317
+ 27,27
2318
+ 27,27
2319
+ 9,27
2320
+ 19,9
2321
+ 2,2
2322
+ 2,2
2323
+ 27,27
2324
+ 14,27
2325
+ 27,27
2326
+ 15,15
2327
+ 6,7
2328
+ 1,1
2329
+ 2,3
2330
+ 1,1
2331
+ 20,27
2332
+ 27,27
2333
+ 2,3
2334
+ 0,0
2335
+ 19,25
2336
+ 15,15
2337
+ 22,0
2338
+ 17,27
2339
+ 1,1
2340
+ 27,27
2341
+ 22,26
2342
+ 22,22
2343
+ 10,9
2344
+ 15,15
2345
+ 3,1
2346
+ 3,2
2347
+ 8,8
2348
+ 20,20
2349
+ 0,3
2350
+ 4,0
2351
+ 15,27
2352
+ 2,27
2353
+ 6,7
2354
+ 15,15
2355
+ 4,27
2356
+ 27,3
2357
+ 3,27
2358
+ 6,6
2359
+ 10,10
2360
+ 25,24
2361
+ 27,25
2362
+ 7,7
2363
+ 27,27
2364
+ 18,18
2365
+ 3,1
2366
+ 27,5
2367
+ 24,24
2368
+ 27,27
2369
+ 2,27
2370
+ 27,27
2371
+ 20,20
2372
+ 27,11
2373
+ 27,27
2374
+ 0,0
2375
+ 7,0
2376
+ 2,1
2377
+ 4,10
2378
+ 11,27
2379
+ 4,4
2380
+ 7,13
2381
+ 10,10
2382
+ 7,7
2383
+ 15,15
2384
+ 27,27
2385
+ 22,27
2386
+ 4,4
2387
+ 25,25
2388
+ 0,0
2389
+ 27,7
2390
+ 9,10
2391
+ 27,27
2392
+ 7,7
2393
+ 7,27
2394
+ 2,3
2395
+ 18,18
2396
+ 0,0
2397
+ 18,18
2398
+ 6,7
2399
+ 2,27
2400
+ 26,26
2401
+ 27,3
2402
+ 14,14
2403
+ 8,27
2404
+ 27,27
2405
+ 27,7
2406
+ 27,10
2407
+ 10,10
2408
+ 27,27
2409
+ 25,6
2410
+ 0,0
2411
+ 10,13
2412
+ 0,27
2413
+ 0,0
2414
+ 27,27
2415
+ 4,27
2416
+ 27,9
2417
+ 0,0
2418
+ 7,7
2419
+ 1,1
2420
+ 13,13
2421
+ 13,15
2422
+ 0,0
2423
+ 20,27
2424
+ 9,1
2425
+ 22,22
2426
+ 1,1
2427
+ 4,18
2428
+ 24,24
2429
+ 27,27
2430
+ 27,27
2431
+ 17,17
2432
+ 3,27
2433
+ 8,27
2434
+ 3,10
2435
+ 27,27
2436
+ 22,7
2437
+ 10,27
2438
+ 27,27
2439
+ 10,10
2440
+ 9,24
2441
+ 27,27
2442
+ 1,1
2443
+ 27,27
2444
+ 27,27
2445
+ 27,3
2446
+ 7,7
2447
+ 15,15
2448
+ 17,27
2449
+ 27,27
2450
+ 27,27
2451
+ 27,10
2452
+ 8,20
2453
+ 1,6
2454
+ 27,13
2455
+ 7,7
2456
+ 27,27
2457
+ 27,27
2458
+ 1,1
2459
+ 4,27
2460
+ 4,27
2461
+ 10,3
2462
+ 0,0
2463
+ 15,15
2464
+ 20,5
2465
+ 15,15
2466
+ 6,7
2467
+ 27,3
2468
+ 9,27
2469
+ 27,27
2470
+ 3,3
2471
+ 8,8
2472
+ 18,18
2473
+ 27,7
2474
+ 27,9
2475
+ 7,7
2476
+ 27,27
2477
+ 17,4
2478
+ 10,1
2479
+ 3,10
2480
+ 0,17
2481
+ 1,1
2482
+ 27,10
2483
+ 1,1
2484
+ 4,5
2485
+ 10,6
2486
+ 5,5
2487
+ 27,27
2488
+ 3,10
2489
+ 1,1
2490
+ 20,5
2491
+ 27,1
2492
+ 13,0
2493
+ 27,27
2494
+ 10,6
2495
+ 4,4
2496
+ 2,2
2497
+ 24,24
2498
+ 7,7
2499
+ 15,15
2500
+ 27,27
2501
+ 1,1
2502
+ 0,0
2503
+ 27,27
2504
+ 17,17
2505
+ 9,27
2506
+ 1,1
2507
+ 27,10
2508
+ 0,0
2509
+ 18,7
2510
+ 27,27
2511
+ 4,27
2512
+ 17,0
2513
+ 4,27
2514
+ 27,3
2515
+ 17,17
2516
+ 7,6
2517
+ 4,18
2518
+ 27,10
2519
+ 6,7
2520
+ 27,2
2521
+ 22,27
2522
+ 18,0
2523
+ 3,2
2524
+ 25,24
2525
+ 27,27
2526
+ 27,27
2527
+ 0,0
2528
+ 27,27
2529
+ 0,0
2530
+ 18,18
2531
+ 27,27
2532
+ 27,27
2533
+ 1,1
2534
+ 15,20
2535
+ 27,27
2536
+ 2,2
2537
+ 22,18
2538
+ 7,7
2539
+ 27,27
2540
+ 27,2
2541
+ 27,27
2542
+ 15,15
2543
+ 5,1
2544
+ 0,0
2545
+ 3,10
2546
+ 3,27
2547
+ 15,2
2548
+ 27,27
2549
+ 2,2
2550
+ 17,1
2551
+ 27,27
2552
+ 27,27
2553
+ 8,8
2554
+ 27,27
2555
+ 26,27
2556
+ 27,27
2557
+ 13,27
2558
+ 15,15
2559
+ 4,4
2560
+ 27,27
2561
+ 9,27
2562
+ 4,0
2563
+ 0,0
2564
+ 27,27
2565
+ 0,0
2566
+ 3,27
2567
+ 9,27
2568
+ 27,9
2569
+ 27,3
2570
+ 27,27
2571
+ 27,27
2572
+ 7,14
2573
+ 3,27
2574
+ 27,27
2575
+ 14,14
2576
+ 27,3
2577
+ 27,27
2578
+ 4,4
2579
+ 0,15
2580
+ 12,12
2581
+ 7,7
2582
+ 1,1
2583
+ 3,3
2584
+ 20,20
2585
+ 11,2
2586
+ 27,27
2587
+ 27,27
2588
+ 27,27
2589
+ 20,7
2590
+ 27,26
2591
+ 22,27
2592
+ 1,1
2593
+ 27,27
2594
+ 1,1
2595
+ 27,27
2596
+ 0,0
2597
+ 4,5
2598
+ 4,4
2599
+ 27,2
2600
+ 27,27
2601
+ 7,7
2602
+ 0,0
2603
+ 14,14
2604
+ 4,27
2605
+ 4,26
2606
+ 15,15
2607
+ 22,13
2608
+ 1,1
2609
+ 27,27
2610
+ 5,20
2611
+ 4,13
2612
+ 10,10
2613
+ 27,4
2614
+ 6,27
2615
+ 3,4
2616
+ 17,17
2617
+ 3,2
2618
+ 7,7
2619
+ 2,2
2620
+ 27,27
2621
+ 4,27
2622
+ 1,1
2623
+ 0,9
2624
+ 0,0
2625
+ 17,17
2626
+ 5,5
2627
+ 20,5
2628
+ 27,10
2629
+ 26,26
2630
+ 27,27
2631
+ 0,0
2632
+ 7,7
2633
+ 15,15
2634
+ 8,8
2635
+ 27,27
2636
+ 11,3
2637
+ 7,7
2638
+ 27,10
2639
+ 5,20
2640
+ 10,18
2641
+ 0,0
2642
+ 0,0
2643
+ 27,27
2644
+ 0,9
2645
+ 0,0
2646
+ 18,18
2647
+ 27,27
2648
+ 27,4
2649
+ 27,27
2650
+ 27,27
2651
+ 27,3
2652
+ 0,18
2653
+ 27,27
2654
+ 27,27
2655
+ 27,27
2656
+ 22,0
2657
+ 4,27
2658
+ 4,27
2659
+ 18,18
2660
+ 27,27
2661
+ 14,14
2662
+ 27,27
2663
+ 0,0
2664
+ 5,27
2665
+ 27,27
2666
+ 3,27
2667
+ 13,7
2668
+ 9,27
2669
+ 7,27
2670
+ 4,4
2671
+ 2,10
2672
+ 0,0
2673
+ 2,3
2674
+ 1,1
2675
+ 27,27
2676
+ 10,27
2677
+ 18,27
2678
+ 27,10
2679
+ 3,3
2680
+ 27,27
2681
+ 27,27
2682
+ 27,27
2683
+ 4,4
2684
+ 27,27
2685
+ 18,18
2686
+ 15,3
2687
+ 5,5
2688
+ 1,1
2689
+ 22,27
2690
+ 18,18
2691
+ 6,6
2692
+ 6,27
2693
+ 27,27
2694
+ 7,15
2695
+ 27,27
2696
+ 14,14
2697
+ 3,11
2698
+ 11,14
2699
+ 27,27
2700
+ 18,18
2701
+ 27,0
2702
+ 1,1
2703
+ 10,10
2704
+ 27,0
2705
+ 27,0
2706
+ 27,27
2707
+ 15,15
2708
+ 27,27
2709
+ 22,26
2710
+ 27,27
2711
+ 8,18
2712
+ 27,27
2713
+ 0,0
2714
+ 0,0
2715
+ 0,0
2716
+ 7,7
2717
+ 1,1
2718
+ 3,10
2719
+ 0,0
2720
+ 18,18
2721
+ 27,27
2722
+ 0,0
2723
+ 27,0
2724
+ 7,7
2725
+ 15,15
2726
+ 27,27
2727
+ 3,14
2728
+ 27,27
2729
+ 27,6
2730
+ 27,27
2731
+ 11,11
2732
+ 4,0
2733
+ 27,4
2734
+ 6,10
2735
+ 4,0
2736
+ 0,0
2737
+ 1,1
2738
+ 4,27
2739
+ 2,26
2740
+ 10,27
2741
+ 5,5
2742
+ 0,0
2743
+ 0,0
2744
+ 0,0
2745
+ 18,18
2746
+ 18,18
2747
+ 7,27
2748
+ 27,27
2749
+ 15,15
2750
+ 27,27
2751
+ 4,27
2752
+ 27,27
2753
+ 20,13
2754
+ 27,9
2755
+ 27,5
2756
+ 15,15
2757
+ 0,0
2758
+ 27,25
2759
+ 27,20
2760
+ 13,13
2761
+ 13,17
2762
+ 1,1
2763
+ 26,13
2764
+ 25,7
2765
+ 27,27
2766
+ 0,20
2767
+ 27,5
2768
+ 14,14
2769
+ 17,17
2770
+ 27,13
2771
+ 10,27
2772
+ 22,10
2773
+ 0,0
2774
+ 6,27
2775
+ 4,27
2776
+ 3,3
2777
+ 27,27
2778
+ 12,12
2779
+ 27,27
2780
+ 25,9
2781
+ 27,27
2782
+ 27,27
2783
+ 4,4
2784
+ 20,27
2785
+ 17,17
2786
+ 0,0
2787
+ 0,18
2788
+ 15,15
2789
+ 7,13
2790
+ 27,27
2791
+ 24,24
2792
+ 27,27
2793
+ 11,14
2794
+ 27,10
2795
+ 27,27
2796
+ 0,0
2797
+ 0,18
2798
+ 13,0
2799
+ 20,5
2800
+ 0,0
2801
+ 8,8
2802
+ 27,27
2803
+ 7,7
2804
+ 0,0
2805
+ 4,10
2806
+ 27,7
2807
+ 27,27
2808
+ 1,1
2809
+ 7,7
2810
+ 3,27
2811
+ 27,27
2812
+ 1,1
2813
+ 27,27
2814
+ 5,18
2815
+ 5,5
2816
+ 3,27
2817
+ 27,27
2818
+ 4,4
2819
+ 1,1
2820
+ 0,0
2821
+ 27,27
2822
+ 4,4
2823
+ 13,13
2824
+ 11,9
2825
+ 3,9
2826
+ 2,7
2827
+ 26,27
2828
+ 9,9
2829
+ 27,27
2830
+ 27,7
2831
+ 27,27
2832
+ 27,27
2833
+ 27,27
2834
+ 9,9
2835
+ 0,1
2836
+ 1,27
2837
+ 26,6
2838
+ 0,0
2839
+ 0,0
2840
+ 27,22
2841
+ 20,20
2842
+ 27,27
2843
+ 15,15
2844
+ 25,25
2845
+ 27,27
2846
+ 27,27
2847
+ 2,11
2848
+ 0,4
2849
+ 7,27
2850
+ 3,2
2851
+ 27,27
2852
+ 15,0
2853
+ 15,15
2854
+ 27,27
2855
+ 25,26
2856
+ 8,8
2857
+ 6,6
2858
+ 27,27
2859
+ 4,3
2860
+ 0,0
2861
+ 0,0
2862
+ 0,0
2863
+ 27,27
2864
+ 3,3
2865
+ 27,27
2866
+ 27,10
2867
+ 27,11
2868
+ 3,7
2869
+ 15,15
2870
+ 0,0
2871
+ 27,27
2872
+ 18,18
2873
+ 18,18
2874
+ 0,0
2875
+ 23,17
2876
+ 3,3
2877
+ 15,15
2878
+ 15,15
2879
+ 4,27
2880
+ 15,17
2881
+ 27,27
2882
+ 26,27
2883
+ 2,27
2884
+ 13,0
2885
+ 5,27
2886
+ 9,9
2887
+ 9,7
2888
+ 15,15
2889
+ 27,0
2890
+ 27,22
2891
+ 27,27
2892
+ 4,10
2893
+ 2,2
2894
+ 13,7
2895
+ 5,5
2896
+ 10,27
2897
+ 27,27
2898
+ 2,2
2899
+ 22,26
2900
+ 13,13
2901
+ 9,27
2902
+ 22,27
2903
+ 27,27
2904
+ 7,7
2905
+ 27,18
2906
+ 27,27
2907
+ 22,27
2908
+ 3,27
2909
+ 0,3
2910
+ 3,27
2911
+ 27,7
2912
+ 26,26
2913
+ 27,27
2914
+ 15,0
2915
+ 27,27
2916
+ 27,10
2917
+ 4,27
2918
+ 4,4
2919
+ 13,27
2920
+ 26,27
2921
+ 2,3
2922
+ 22,6
2923
+ 2,2
2924
+ 27,27
2925
+ 20,5
2926
+ 1,1
2927
+ 27,0
2928
+ 3,26
2929
+ 25,25
2930
+ 9,9
2931
+ 27,27
2932
+ 0,0
2933
+ 3,27
2934
+ 1,1
2935
+ 10,3
2936
+ 15,15
2937
+ 11,11
2938
+ 7,27
2939
+ 27,27
2940
+ 1,1
2941
+ 18,18
2942
+ 18,18
2943
+ 4,5
2944
+ 4,4
2945
+ 4,0
2946
+ 4,4
2947
+ 11,11
2948
+ 27,27
2949
+ 4,7
2950
+ 0,0
2951
+ 22,11
2952
+ 18,18
2953
+ 3,27
2954
+ 0,0
2955
+ 2,2
2956
+ 18,18
2957
+ 18,18
2958
+ 18,0
2959
+ 27,27
2960
+ 2,2
2961
+ 0,4
2962
+ 27,27
2963
+ 15,15
2964
+ 15,15
2965
+ 0,0
2966
+ 7,7
2967
+ 2,2
2968
+ 0,0
2969
+ 1,1
2970
+ 7,7
2971
+ 10,27
2972
+ 4,0
2973
+ 27,27
2974
+ 0,0
2975
+ 27,27
2976
+ 6,7
2977
+ 22,4
2978
+ 27,27
2979
+ 27,27
2980
+ 6,5
2981
+ 15,15
2982
+ 0,15
2983
+ 4,4
2984
+ 10,10
2985
+ 3,2
2986
+ 15,15
2987
+ 27,27
2988
+ 27,27
2989
+ 10,27
2990
+ 6,27
2991
+ 0,0
2992
+ 25,24
2993
+ 7,26
2994
+ 2,27
2995
+ 3,3
2996
+ 8,8
2997
+ 9,9
2998
+ 3,27
2999
+ 27,27
3000
+ 11,27
3001
+ 1,1
3002
+ 15,15
3003
+ 27,27
3004
+ 27,27
3005
+ 2,11
3006
+ 7,7
3007
+ 27,27
3008
+ 16,27
3009
+ 0,0
3010
+ 3,2
3011
+ 27,10
3012
+ 18,18
3013
+ 27,27
3014
+ 11,27
3015
+ 27,27
3016
+ 20,27
3017
+ 2,25
3018
+ 17,17
3019
+ 27,22
3020
+ 15,15
3021
+ 15,15
3022
+ 0,0
3023
+ 6,6
3024
+ 4,4
3025
+ 27,9
3026
+ 27,0
3027
+ 10,10
3028
+ 6,6
3029
+ 4,4
3030
+ 9,3
3031
+ 20,27
3032
+ 0,0
3033
+ 1,1
3034
+ 27,27
3035
+ 12,12
3036
+ 9,0
3037
+ 18,18
3038
+ 0,0
3039
+ 2,2
3040
+ 3,18
3041
+ 6,15
3042
+ 1,1
3043
+ 4,4
3044
+ 15,15
3045
+ 3,2
3046
+ 2,3
3047
+ 1,1
3048
+ 26,2
3049
+ 3,2
3050
+ 27,27
3051
+ 5,27
3052
+ 5,27
3053
+ 0,0
3054
+ 15,15
3055
+ 11,27
3056
+ 7,27
3057
+ 27,6
3058
+ 18,18
3059
+ 22,4
3060
+ 4,15
3061
+ 27,27
3062
+ 5,27
3063
+ 27,0
3064
+ 27,27
3065
+ 27,2
3066
+ 2,2
3067
+ 27,27
3068
+ 3,10
3069
+ 27,27
3070
+ 4,27
3071
+ 27,4
3072
+ 13,13
3073
+ 17,1
3074
+ 27,27
3075
+ 18,18
3076
+ 10,3
3077
+ 3,3
3078
+ 5,27
3079
+ 27,27
3080
+ 27,27
3081
+ 27,27
3082
+ 4,27
3083
+ 13,13
3084
+ 1,1
3085
+ 9,27
3086
+ 27,27
3087
+ 27,27
3088
+ 5,27
3089
+ 3,27
3090
+ 27,7
3091
+ 17,17
3092
+ 4,27
3093
+ 27,27
3094
+ 7,7
3095
+ 27,4
3096
+ 15,15
3097
+ 15,15
3098
+ 15,15
3099
+ 12,27
3100
+ 27,27
3101
+ 27,27
3102
+ 22,1
3103
+ 15,15
3104
+ 9,27
3105
+ 7,7
3106
+ 15,15
3107
+ 20,5
3108
+ 27,27
3109
+ 9,9
3110
+ 15,15
3111
+ 27,4
3112
+ 27,27
3113
+ 15,17
3114
+ 10,27
3115
+ 27,5
3116
+ 27,1
3117
+ 7,7
3118
+ 12,27
3119
+ 9,27
3120
+ 27,27
3121
+ 7,7
3122
+ 15,15
3123
+ 27,14
3124
+ 0,0
3125
+ 27,27
3126
+ 16,27
3127
+ 1,1
3128
+ 27,3
3129
+ 0,0
3130
+ 7,27
3131
+ 27,27
3132
+ 26,0
3133
+ 10,27
3134
+ 18,18
3135
+ 7,27
3136
+ 27,27
3137
+ 2,2
3138
+ 6,6
3139
+ 2,7
3140
+ 27,27
3141
+ 3,2
3142
+ 27,27
3143
+ 27,27
3144
+ 15,13
3145
+ 2,2
3146
+ 18,18
3147
+ 26,27
3148
+ 27,27
3149
+ 7,7
3150
+ 7,27
3151
+ 27,27
3152
+ 1,27
3153
+ 15,15
3154
+ 27,27
3155
+ 26,26
3156
+ 27,27
3157
+ 27,27
3158
+ 9,13
3159
+ 26,26
3160
+ 7,7
3161
+ 10,10
3162
+ 15,0
3163
+ 27,27
3164
+ 27,27
3165
+ 15,15
3166
+ 7,7
3167
+ 27,18
3168
+ 20,27
3169
+ 0,0
3170
+ 27,10
3171
+ 4,0
3172
+ 27,20
3173
+ 27,27
3174
+ 5,17
3175
+ 27,6
3176
+ 7,7
3177
+ 7,7
3178
+ 4,4
3179
+ 27,5
3180
+ 0,27
3181
+ 27,27
3182
+ 18,18
3183
+ 27,10
3184
+ 27,27
3185
+ 5,5
3186
+ 27,27
3187
+ 14,14
3188
+ 15,13
3189
+ 0,0
3190
+ 27,27
3191
+ 15,15
3192
+ 15,15
3193
+ 27,27
3194
+ 27,27
3195
+ 0,0
3196
+ 27,27
3197
+ 27,27
3198
+ 27,7
3199
+ 10,10
3200
+ 3,10
3201
+ 27,27
3202
+ 27,0
3203
+ 7,7
3204
+ 8,17
3205
+ 27,7
3206
+ 5,5
3207
+ 3,2
3208
+ 15,15
3209
+ 27,27
3210
+ 27,27
3211
+ 9,18
3212
+ 2,2
3213
+ 3,27
3214
+ 18,1
3215
+ 0,0
3216
+ 17,1
3217
+ 17,17
3218
+ 0,0
3219
+ 27,27
3220
+ 11,1
3221
+ 27,7
3222
+ 27,27
3223
+ 27,27
3224
+ 27,27
3225
+ 8,8
3226
+ 1,1
3227
+ 5,5
3228
+ 15,15
3229
+ 3,3
3230
+ 17,17
3231
+ 15,15
3232
+ 27,27
3233
+ 27,0
3234
+ 2,2
3235
+ 22,26
3236
+ 3,7
3237
+ 9,27
3238
+ 5,5
3239
+ 3,27
3240
+ 27,27
3241
+ 3,27
3242
+ 7,26
3243
+ 3,3
3244
+ 4,4
3245
+ 18,18
3246
+ 26,27
3247
+ 27,27
3248
+ 0,1
3249
+ 9,25
3250
+ 27,27
3251
+ 1,4
3252
+ 25,25
3253
+ 0,0
3254
+ 18,18
3255
+ 12,3
3256
+ 10,10
3257
+ 1,1
3258
+ 27,27
3259
+ 10,7
3260
+ 6,27
3261
+ 3,27
3262
+ 27,27
3263
+ 8,2
3264
+ 3,2
3265
+ 27,9
3266
+ 27,27
3267
+ 0,0
3268
+ 0,0
3269
+ 0,0
3270
+ 20,17
3271
+ 4,10
3272
+ 7,7
3273
+ 27,27
3274
+ 15,15
3275
+ 27,27
3276
+ 1,1
3277
+ 27,27
3278
+ 20,27
3279
+ 0,0
3280
+ 1,1
3281
+ 27,13
3282
+ 27,27
3283
+ 15,0
3284
+ 11,27
3285
+ 8,27
3286
+ 0,0
3287
+ 4,10
3288
+ 27,13
3289
+ 0,0
3290
+ 0,0
3291
+ 27,27
3292
+ 13,15
3293
+ 1,2
3294
+ 8,8
3295
+ 9,27
3296
+ 27,7
3297
+ 0,0
3298
+ 3,11
3299
+ 2,27
3300
+ 9,2
3301
+ 0,0
3302
+ 27,27
3303
+ 27,27
3304
+ 0,0
3305
+ 4,4
3306
+ 0,0
3307
+ 18,18
3308
+ 27,18
3309
+ 2,27
3310
+ 0,0
3311
+ 27,27
3312
+ 19,6
3313
+ 10,10
3314
+ 27,27
3315
+ 1,1
3316
+ 4,9
3317
+ 11,11
3318
+ 0,0
3319
+ 0,0
3320
+ 27,27
3321
+ 4,7
3322
+ 27,7
3323
+ 26,2
3324
+ 6,26
3325
+ 0,13
3326
+ 27,27
3327
+ 27,17
3328
+ 22,27
3329
+ 4,4
3330
+ 1,1
3331
+ 13,26
3332
+ 3,3
3333
+ 18,18
3334
+ 4,27
3335
+ 27,27
3336
+ 27,27
3337
+ 2,2
3338
+ 2,26
3339
+ 14,27
3340
+ 20,27
3341
+ 20,20
3342
+ 14,14
3343
+ 7,7
3344
+ 27,27
3345
+ 0,0
3346
+ 27,27
3347
+ 12,2
3348
+ 27,27
3349
+ 3,10
3350
+ 3,27
3351
+ 27,27
3352
+ 24,24
3353
+ 6,7
3354
+ 27,27
3355
+ 17,17
3356
+ 4,4
3357
+ 7,6
3358
+ 25,25
3359
+ 27,27
3360
+ 15,13
3361
+ 0,0
3362
+ 4,0
3363
+ 27,7
3364
+ 15,15
3365
+ 27,0
3366
+ 27,27
3367
+ 1,1
3368
+ 27,10
3369
+ 16,24
3370
+ 2,27
3371
+ 27,27
3372
+ 1,1
3373
+ 4,20
3374
+ 27,27
3375
+ 3,3
3376
+ 3,27
3377
+ 15,15
3378
+ 2,3
3379
+ 1,1
3380
+ 7,26
3381
+ 27,27
3382
+ 27,27
3383
+ 0,0
3384
+ 7,7
3385
+ 27,27
3386
+ 4,0
3387
+ 27,27
3388
+ 4,0
3389
+ 17,17
3390
+ 0,0
3391
+ 27,27
3392
+ 8,8
3393
+ 14,14
3394
+ 27,27
3395
+ 27,7
3396
+ 3,27
3397
+ 15,15
3398
+ 20,5
3399
+ 10,27
3400
+ 10,27
3401
+ 0,0
3402
+ 27,27
3403
+ 0,0
3404
+ 27,5
3405
+ 13,0
3406
+ 0,0
3407
+ 26,27
3408
+ 15,15
3409
+ 4,27
3410
+ 27,27
3411
+ 0,0
3412
+ 6,27
3413
+ 27,17
3414
+ 14,2
3415
+ 4,27
3416
+ 25,25
3417
+ 13,27
3418
+ 27,4
3419
+ 27,27
3420
+ 14,9
3421
+ 27,4
3422
+ 4,4
3423
+ 27,27
3424
+ 18,18
3425
+ 7,7
3426
+ 27,27
3427
+ 27,27
3428
+ 27,27
3429
+ 27,27
3430
+ 10,10
3431
+ 3,14
3432
+ 10,27
3433
+ 25,2
3434
+ 11,11
3435
+ 1,0
3436
+ 1,1
3437
+ 20,20
3438
+ 7,7
3439
+ 6,7
3440
+ 13,27
3441
+ 27,27
3442
+ 2,2
3443
+ 2,2
3444
+ 2,2
3445
+ 26,26
3446
+ 13,4
3447
+ 4,4
3448
+ 7,27
3449
+ 27,27
3450
+ 1,1
3451
+ 10,1
3452
+ 6,27
3453
+ 27,27
3454
+ 27,27
3455
+ 27,3
3456
+ 4,27
3457
+ 4,4
3458
+ 6,4
3459
+ 27,2
3460
+ 1,1
3461
+ 0,0
3462
+ 27,27
3463
+ 27,27
3464
+ 9,20
3465
+ 6,6
3466
+ 21,0
3467
+ 27,27
3468
+ 2,3
3469
+ 15,0
3470
+ 3,27
3471
+ 27,27
3472
+ 27,27
3473
+ 3,3
3474
+ 0,0
3475
+ 22,27
3476
+ 27,4
3477
+ 6,6
3478
+ 27,27
3479
+ 22,27
3480
+ 1,1
3481
+ 2,2
3482
+ 1,1
3483
+ 15,15
3484
+ 7,14
3485
+ 27,27
3486
+ 20,20
3487
+ 27,27
3488
+ 0,20
3489
+ 17,8
3490
+ 0,15
3491
+ 18,25
3492
+ 27,27
3493
+ 15,0
3494
+ 27,27
3495
+ 27,17
3496
+ 25,25
3497
+ 4,4
3498
+ 27,5
3499
+ 27,27
3500
+ 2,2
3501
+ 27,27
3502
+ 2,2
3503
+ 5,27
3504
+ 5,4
3505
+ 23,0
3506
+ 22,4
3507
+ 27,27
3508
+ 0,18
3509
+ 27,3
3510
+ 3,3
3511
+ 27,15
3512
+ 27,27
3513
+ 20,27
3514
+ 27,10
3515
+ 1,27
3516
+ 0,0
3517
+ 1,1
3518
+ 0,0
3519
+ 27,6
3520
+ 9,9
3521
+ 27,27
3522
+ 7,7
3523
+ 2,2
3524
+ 27,27
3525
+ 26,26
3526
+ 26,7
3527
+ 7,27
3528
+ 27,27
3529
+ 9,9
3530
+ 2,3
3531
+ 27,27
3532
+ 22,22
3533
+ 2,2
3534
+ 27,27
3535
+ 26,26
3536
+ 1,1
3537
+ 18,18
3538
+ 1,0
3539
+ 0,0
3540
+ 27,27
3541
+ 11,11
3542
+ 17,17
3543
+ 27,27
3544
+ 3,3
3545
+ 27,27
3546
+ 2,2
3547
+ 7,7
3548
+ 26,26
3549
+ 18,18
3550
+ 27,27
3551
+ 2,2
3552
+ 27,27
3553
+ 27,27
3554
+ 20,20
3555
+ 27,27
3556
+ 0,0
3557
+ 3,6
3558
+ 27,6
3559
+ 27,27
3560
+ 27,7
3561
+ 0,27
3562
+ 0,0
3563
+ 1,1
3564
+ 4,20
3565
+ 20,20
3566
+ 27,27
3567
+ 27,4
3568
+ 0,0
3569
+ 20,5
3570
+ 4,27
3571
+ 27,27
3572
+ 26,26
3573
+ 20,20
3574
+ 4,2
3575
+ 27,27
3576
+ 27,27
3577
+ 3,17
3578
+ 19,5
3579
+ 0,0
3580
+ 27,1
3581
+ 4,27
3582
+ 7,7
3583
+ 13,0
3584
+ 14,7
3585
+ 27,27
3586
+ 27,27
3587
+ 15,7
3588
+ 27,27
3589
+ 27,27
3590
+ 27,7
3591
+ 27,27
3592
+ 11,11
3593
+ 0,18
3594
+ 15,15
3595
+ 6,7
3596
+ 6,6
3597
+ 27,27
3598
+ 27,27
3599
+ 0,0
3600
+ 24,24
3601
+ 7,7
3602
+ 4,27
3603
+ 15,15
3604
+ 27,27
3605
+ 1,1
3606
+ 3,27
3607
+ 27,0
3608
+ 11,27
3609
+ 4,27
3610
+ 27,27
3611
+ 11,11
3612
+ 7,7
3613
+ 0,0
3614
+ 1,1
3615
+ 10,27
3616
+ 3,2
3617
+ 15,15
3618
+ 13,27
3619
+ 17,0
3620
+ 26,27
3621
+ 20,20
3622
+ 27,27
3623
+ 27,27
3624
+ 20,27
3625
+ 1,27
3626
+ 4,4
3627
+ 3,27
3628
+ 7,7
3629
+ 22,27
3630
+ 26,26
3631
+ 27,27
3632
+ 23,4
3633
+ 15,15
3634
+ 7,7
3635
+ 7,7
3636
+ 27,27
3637
+ 8,27
3638
+ 4,27
3639
+ 27,25
3640
+ 20,26
3641
+ 6,27
3642
+ 2,27
3643
+ 27,27
3644
+ 14,14
3645
+ 22,22
3646
+ 27,27
3647
+ 22,25
3648
+ 0,15
3649
+ 18,18
3650
+ 27,7
3651
+ 3,3
3652
+ 27,27
3653
+ 27,4
3654
+ 18,0
3655
+ 13,27
3656
+ 24,9
3657
+ 10,27
3658
+ 27,27
3659
+ 10,27
3660
+ 18,18
3661
+ 15,0
3662
+ 0,0
3663
+ 27,6
3664
+ 0,0
3665
+ 27,27
3666
+ 20,27
3667
+ 27,10
3668
+ 3,9
3669
+ 27,27
3670
+ 27,27
3671
+ 25,25
3672
+ 3,3
3673
+ 27,0
3674
+ 15,15
3675
+ 4,27
3676
+ 1,1
3677
+ 27,27
3678
+ 1,1
3679
+ 11,27
3680
+ 15,15
3681
+ 27,7
3682
+ 0,13
3683
+ 6,7
3684
+ 4,20
3685
+ 17,17
3686
+ 27,10
3687
+ 0,0
3688
+ 9,27
3689
+ 8,18
3690
+ 22,26
3691
+ 0,0
3692
+ 0,0
3693
+ 27,2
3694
+ 11,11
3695
+ 27,27
3696
+ 11,3
3697
+ 8,25
3698
+ 10,1
3699
+ 2,27
3700
+ 4,4
3701
+ 6,7
3702
+ 4,27
3703
+ 27,2
3704
+ 24,20
3705
+ 20,1
3706
+ 27,27
3707
+ 27,27
3708
+ 15,15
3709
+ 15,20
3710
+ 10,10
3711
+ 27,27
3712
+ 2,3
3713
+ 10,27
3714
+ 8,8
3715
+ 0,0
3716
+ 22,0
3717
+ 18,18
3718
+ 0,0
3719
+ 0,0
3720
+ 0,0
3721
+ 0,0
3722
+ 20,5
3723
+ 27,27
3724
+ 0,0
3725
+ 5,0
3726
+ 3,4
3727
+ 2,2
3728
+ 9,27
3729
+ 4,4
3730
+ 8,0
3731
+ 27,27
3732
+ 9,3
3733
+ 27,27
3734
+ 5,27
3735
+ 18,18
3736
+ 4,17
3737
+ 2,2
3738
+ 27,27
3739
+ 9,27
3740
+ 14,14
3741
+ 27,27
3742
+ 26,26
3743
+ 3,9
3744
+ 15,0
3745
+ 7,7
3746
+ 0,0
3747
+ 15,15
3748
+ 4,4
3749
+ 2,27
3750
+ 27,3
3751
+ 15,15
3752
+ 18,18
3753
+ 1,1
3754
+ 4,27
3755
+ 0,0
3756
+ 27,27
3757
+ 18,18
3758
+ 27,7
3759
+ 10,27
3760
+ 15,15
3761
+ 20,27
3762
+ 2,2
3763
+ 2,2
3764
+ 0,0
3765
+ 27,27
3766
+ 2,27
3767
+ 4,4
3768
+ 27,11
3769
+ 26,7
3770
+ 1,1
3771
+ 27,27
3772
+ 6,7
3773
+ 1,1
3774
+ 10,0
3775
+ 0,0
3776
+ 2,3
3777
+ 7,7
3778
+ 26,26
3779
+ 13,4
3780
+ 27,27
3781
+ 9,27
3782
+ 15,15
3783
+ 1,1
3784
+ 27,27
3785
+ 24,24
3786
+ 8,17
3787
+ 7,27
3788
+ 22,11
3789
+ 18,18
3790
+ 1,1
3791
+ 10,27
3792
+ 3,10
3793
+ 6,7
3794
+ 4,4
3795
+ 27,5
3796
+ 27,9
3797
+ 27,7
3798
+ 4,4
3799
+ 27,18
3800
+ 15,15
3801
+ 3,27
3802
+ 27,0
3803
+ 0,0
3804
+ 1,1
3805
+ 0,20
3806
+ 27,27
3807
+ 3,4
3808
+ 1,1
3809
+ 4,18
3810
+ 4,4
3811
+ 10,25
3812
+ 15,15
3813
+ 17,15
3814
+ 27,4
3815
+ 20,20
3816
+ 15,0
3817
+ 1,1
3818
+ 4,27
3819
+ 27,27
3820
+ 4,27
3821
+ 22,27
3822
+ 15,15
3823
+ 3,15
3824
+ 17,18
3825
+ 9,3
3826
+ 27,27
3827
+ 27,4
3828
+ 27,0
3829
+ 17,17
3830
+ 26,27
3831
+ 9,27
3832
+ 27,10
3833
+ 27,27
3834
+ 9,6
3835
+ 19,14
3836
+ 0,0
3837
+ 15,0
3838
+ 4,6
3839
+ 1,1
3840
+ 5,27
3841
+ 1,1
3842
+ 27,27
3843
+ 27,5
3844
+ 4,17
3845
+ 7,6
3846
+ 27,27
3847
+ 27,27
3848
+ 10,10
3849
+ 15,15
3850
+ 27,27
3851
+ 22,27
3852
+ 12,12
3853
+ 4,27
3854
+ 17,17
3855
+ 10,27
3856
+ 14,14
3857
+ 10,27
3858
+ 20,20
3859
+ 27,27
3860
+ 25,0
3861
+ 8,13
3862
+ 27,27
3863
+ 27,27
3864
+ 6,7
3865
+ 2,3
3866
+ 17,18
3867
+ 7,27
3868
+ 18,27
3869
+ 0,27
3870
+ 8,8
3871
+ 27,27
3872
+ 18,18
3873
+ 4,4
3874
+ 10,6
3875
+ 8,8
3876
+ 7,7
3877
+ 11,11
3878
+ 0,0
3879
+ 27,27
3880
+ 27,27
3881
+ 15,15
3882
+ 7,27
3883
+ 11,3
3884
+ 27,27
3885
+ 2,2
3886
+ 27,27
3887
+ 26,27
3888
+ 27,27
3889
+ 17,17
3890
+ 3,27
3891
+ 18,18
3892
+ 7,27
3893
+ 27,6
3894
+ 17,17
3895
+ 3,11
3896
+ 18,18
3897
+ 27,4
3898
+ 1,1
3899
+ 7,6
3900
+ 3,3
3901
+ 10,27
3902
+ 20,20
3903
+ 22,10
3904
+ 0,0
3905
+ 27,27
3906
+ 0,0
3907
+ 27,27
3908
+ 27,5
3909
+ 1,1
3910
+ 22,22
3911
+ 27,27
3912
+ 3,3
3913
+ 0,0
3914
+ 15,15
3915
+ 27,10
3916
+ 27,27
3917
+ 27,27
3918
+ 22,27
3919
+ 27,10
3920
+ 27,27
3921
+ 4,5
3922
+ 15,15
3923
+ 3,27
3924
+ 7,7
3925
+ 27,3
3926
+ 4,27
3927
+ 6,6
3928
+ 0,0
3929
+ 6,7
3930
+ 14,11
3931
+ 5,27
3932
+ 27,0
3933
+ 27,27
3934
+ 7,27
3935
+ 27,10
3936
+ 15,15
3937
+ 22,22
3938
+ 15,15
3939
+ 27,27
3940
+ 2,2
3941
+ 22,6
3942
+ 27,9
3943
+ 4,3
3944
+ 0,0
3945
+ 1,17
3946
+ 10,27
3947
+ 4,27
3948
+ 0,0
3949
+ 27,27
3950
+ 27,27
3951
+ 27,27
3952
+ 6,6
3953
+ 3,27
3954
+ 22,1
3955
+ 27,27
3956
+ 27,7
3957
+ 0,0
3958
+ 27,27
3959
+ 2,27
3960
+ 4,4
3961
+ 9,27
3962
+ 3,27
3963
+ 14,14
3964
+ 6,0
3965
+ 27,10
3966
+ 10,10
3967
+ 20,15
3968
+ 27,27
3969
+ 1,1
3970
+ 15,1
3971
+ 10,10
3972
+ 27,18
3973
+ 0,0
3974
+ 10,10
3975
+ 1,1
3976
+ 27,27
3977
+ 17,4
3978
+ 27,1
3979
+ 13,17
3980
+ 27,27
3981
+ 27,27
3982
+ 2,2
3983
+ 5,27
3984
+ 27,27
3985
+ 27,27
3986
+ 2,3
3987
+ 5,5
3988
+ 27,27
3989
+ 1,1
3990
+ 9,27
3991
+ 2,2
3992
+ 4,27
3993
+ 11,10
3994
+ 27,27
3995
+ 27,27
3996
+ 27,7
3997
+ 0,0
3998
+ 18,18
3999
+ 1,1
4000
+ 0,0
4001
+ 25,25
4002
+ 7,2
4003
+ 27,27
4004
+ 5,27
4005
+ 17,18
4006
+ 20,27
4007
+ 27,27
4008
+ 27,27
4009
+ 27,2
4010
+ 26,26
4011
+ 18,18
4012
+ 27,0
4013
+ 10,27
4014
+ 27,27
4015
+ 16,26
4016
+ 27,27
4017
+ 10,6
4018
+ 0,0
4019
+ 27,27
4020
+ 26,26
4021
+ 2,27
4022
+ 17,17
4023
+ 13,13
4024
+ 5,10
4025
+ 10,10
4026
+ 4,4
4027
+ 0,0
4028
+ 27,27
4029
+ 7,7
4030
+ 14,27
4031
+ 9,9
4032
+ 1,0
4033
+ 9,3
4034
+ 27,7
4035
+ 1,1
4036
+ 27,6
4037
+ 27,27
4038
+ 18,18
4039
+ 27,27
4040
+ 6,7
4041
+ 8,8
4042
+ 14,14
4043
+ 2,2
4044
+ 3,2
4045
+ 5,5
4046
+ 0,0
4047
+ 15,17
4048
+ 27,27
4049
+ 1,1
4050
+ 3,18
4051
+ 27,27
4052
+ 15,15
4053
+ 20,0
4054
+ 27,27
4055
+ 4,27
4056
+ 3,27
4057
+ 1,1
4058
+ 18,18
4059
+ 8,20
4060
+ 15,15
4061
+ 24,1
4062
+ 9,15
4063
+ 27,10
4064
+ 8,18
4065
+ 2,2
4066
+ 1,27
4067
+ 27,27
4068
+ 10,27
4069
+ 7,27
4070
+ 10,27
4071
+ 3,7
4072
+ 25,25
4073
+ 18,18
4074
+ 27,27
4075
+ 7,7
4076
+ 10,10
4077
+ 4,5
4078
+ 4,4
4079
+ 7,27
4080
+ 27,4
4081
+ 27,27
4082
+ 27,3
4083
+ 27,27
4084
+ 0,0
4085
+ 0,0
4086
+ 27,27
4087
+ 9,6
4088
+ 4,0
4089
+ 26,26
4090
+ 16,27
4091
+ 10,26
4092
+ 17,17
4093
+ 27,27
4094
+ 0,1
4095
+ 17,17
4096
+ 0,0
4097
+ 26,26
4098
+ 27,27
4099
+ 27,27
4100
+ 2,27
4101
+ 5,17
4102
+ 1,1
4103
+ 2,3
4104
+ 9,15
4105
+ 0,0
4106
+ 18,27
4107
+ 5,27
4108
+ 5,20
4109
+ 24,24
4110
+ 0,0
4111
+ 27,27
4112
+ 14,14
4113
+ 6,6
4114
+ 7,7
4115
+ 17,17
4116
+ 27,27
4117
+ 9,27
4118
+ 18,18
4119
+ 1,1
4120
+ 10,10
4121
+ 1,1
4122
+ 27,27
4123
+ 2,27
4124
+ 6,7
4125
+ 27,17
4126
+ 27,27
4127
+ 8,27
4128
+ 27,3
4129
+ 4,4
4130
+ 17,17
4131
+ 4,26
4132
+ 7,27
4133
+ 3,27
4134
+ 1,1
4135
+ 7,7
4136
+ 10,10
4137
+ 4,4
4138
+ 27,2
4139
+ 27,27
4140
+ 10,10
4141
+ 3,4
4142
+ 1,1
4143
+ 18,18
4144
+ 6,3
4145
+ 27,27
4146
+ 3,18
4147
+ 27,27
4148
+ 10,10
4149
+ 24,24
4150
+ 4,27
4151
+ 24,24
4152
+ 27,7
4153
+ 24,24
4154
+ 25,25
4155
+ 27,5
4156
+ 8,17
4157
+ 18,18
4158
+ 27,25
4159
+ 6,7
4160
+ 9,27
4161
+ 10,24
4162
+ 2,10
4163
+ 27,27
4164
+ 27,2
4165
+ 27,27
4166
+ 27,27
4167
+ 27,27
4168
+ 10,27
4169
+ 24,24
4170
+ 2,11
4171
+ 27,27
4172
+ 7,7
4173
+ 27,27
4174
+ 1,1
4175
+ 27,27
4176
+ 27,27
4177
+ 2,7
4178
+ 20,15
4179
+ 0,0
4180
+ 27,27
4181
+ 0,3
4182
+ 2,27
4183
+ 3,10
4184
+ 0,13
4185
+ 25,25
4186
+ 13,13
4187
+ 27,27
4188
+ 27,27
4189
+ 22,26
4190
+ 12,26
4191
+ 1,1
4192
+ 26,7
4193
+ 27,27
4194
+ 0,17
4195
+ 27,27
4196
+ 27,27
4197
+ 15,15
4198
+ 0,0
4199
+ 17,17
4200
+ 27,1
4201
+ 2,7
4202
+ 1,1
4203
+ 21,18
4204
+ 7,7
4205
+ 27,27
4206
+ 4,4
4207
+ 27,27
4208
+ 27,27
4209
+ 7,7
4210
+ 27,27
4211
+ 27,0
4212
+ 27,0
4213
+ 27,27
4214
+ 14,14
4215
+ 9,3
4216
+ 18,18
4217
+ 15,15
4218
+ 4,4
4219
+ 27,27
4220
+ 15,15
4221
+ 27,27
4222
+ 10,0
4223
+ 7,7
4224
+ 20,20
4225
+ 9,27
4226
+ 4,27
4227
+ 13,27
4228
+ 15,15
4229
+ 27,27
4230
+ 6,27
4231
+ 3,27
4232
+ 11,2
4233
+ 27,1
4234
+ 0,0
4235
+ 9,9
4236
+ 27,10
4237
+ 18,18
4238
+ 1,1
4239
+ 4,27
4240
+ 27,27
4241
+ 7,7
4242
+ 2,6
4243
+ 27,27
4244
+ 27,0
4245
+ 15,15
4246
+ 7,27
4247
+ 20,20
4248
+ 27,27
4249
+ 27,27
4250
+ 27,27
4251
+ 27,27
4252
+ 10,27
4253
+ 10,10
4254
+ 27,27
4255
+ 25,25
4256
+ 27,27
4257
+ 0,18
4258
+ 27,2
4259
+ 13,13
4260
+ 15,15
4261
+ 6,7
4262
+ 1,2
4263
+ 20,20
4264
+ 9,0
4265
+ 27,27
4266
+ 10,3
4267
+ 4,0
4268
+ 15,15
4269
+ 15,10
4270
+ 7,7
4271
+ 15,15
4272
+ 9,3
4273
+ 0,0
4274
+ 0,0
4275
+ 0,27
4276
+ 27,27
4277
+ 27,27
4278
+ 17,27
4279
+ 13,27
4280
+ 2,11
4281
+ 27,27
4282
+ 27,27
4283
+ 0,0
4284
+ 9,27
4285
+ 7,7
4286
+ 27,27
4287
+ 4,4
4288
+ 27,27
4289
+ 1,1
4290
+ 27,27
4291
+ 4,1
4292
+ 25,27
4293
+ 18,18
4294
+ 20,20
4295
+ 27,27
4296
+ 27,27
4297
+ 27,27
4298
+ 7,27
4299
+ 24,24
4300
+ 0,0
4301
+ 9,9
4302
+ 17,27
4303
+ 17,17
4304
+ 4,27
4305
+ 3,27
4306
+ 22,9
4307
+ 11,11
4308
+ 0,0
4309
+ 27,27
4310
+ 7,7
4311
+ 27,27
4312
+ 7,7
4313
+ 5,5
4314
+ 9,3
4315
+ 27,10
4316
+ 25,27
4317
+ 6,6
4318
+ 27,27
4319
+ 27,27
4320
+ 27,27
4321
+ 5,20
4322
+ 27,27
4323
+ 8,8
4324
+ 27,3
4325
+ 4,4
4326
+ 27,27
4327
+ 27,27
4328
+ 10,11
4329
+ 0,0
4330
+ 10,27
4331
+ 18,18
4332
+ 27,26
4333
+ 3,7
4334
+ 18,7
4335
+ 4,27
4336
+ 0,0
4337
+ 6,7
4338
+ 6,6
4339
+ 5,27
4340
+ 0,18
4341
+ 27,27
4342
+ 27,27
4343
+ 24,24
4344
+ 2,2
4345
+ 27,27
4346
+ 7,7
4347
+ 27,27
4348
+ 27,1
4349
+ 10,20
4350
+ 27,7
4351
+ 1,1
4352
+ 0,0
4353
+ 0,0
4354
+ 27,27
4355
+ 3,27
4356
+ 6,6
4357
+ 5,4
4358
+ 0,0
4359
+ 1,1
4360
+ 27,27
4361
+ 3,27
4362
+ 5,5
4363
+ 27,27
4364
+ 26,26
4365
+ 0,0
4366
+ 7,7
4367
+ 10,18
4368
+ 19,27
4369
+ 15,15
4370
+ 6,6
4371
+ 3,3
4372
+ 0,0
4373
+ 2,1
4374
+ 1,1
4375
+ 17,0
4376
+ 8,20
4377
+ 10,10
4378
+ 0,0
4379
+ 4,0
4380
+ 3,18
4381
+ 6,7
4382
+ 3,2
4383
+ 3,27
4384
+ 27,27
4385
+ 4,27
4386
+ 7,27
4387
+ 3,27
4388
+ 11,3
4389
+ 27,27
4390
+ 18,18
4391
+ 18,18
4392
+ 4,4
4393
+ 27,10
4394
+ 27,27
4395
+ 25,9
4396
+ 25,25
4397
+ 27,4
4398
+ 3,3
4399
+ 9,9
4400
+ 1,27
4401
+ 4,24
4402
+ 4,4
4403
+ 3,27
4404
+ 27,27
4405
+ 4,27
4406
+ 18,18
4407
+ 4,4
4408
+ 27,27
4409
+ 0,18
4410
+ 27,27
4411
+ 4,4
4412
+ 5,4
4413
+ 26,27
4414
+ 1,1
4415
+ 1,1
4416
+ 27,27
4417
+ 8,18
4418
+ 0,0
4419
+ 27,1
4420
+ 17,0
4421
+ 20,8
4422
+ 7,27
4423
+ 27,2
4424
+ 27,27
4425
+ 14,14
4426
+ 27,27
4427
+ 5,5
4428
+ 3,2
4429
+ 27,2
4430
+ 15,15
4431
+ 4,4
4432
+ 17,7
4433
+ 4,1
4434
+ 14,14
4435
+ 27,27
4436
+ 2,27
4437
+ 17,18
4438
+ 1,1
4439
+ 27,27
4440
+ 4,27
4441
+ 27,7
4442
+ 0,0
4443
+ 27,2
4444
+ 27,27
4445
+ 25,25
4446
+ 27,14
4447
+ 27,0
4448
+ 4,27
4449
+ 27,27
4450
+ 1,1
4451
+ 11,11
4452
+ 27,27
4453
+ 27,9
4454
+ 6,6
4455
+ 9,27
4456
+ 18,18
4457
+ 25,27
4458
+ 13,13
4459
+ 27,7
4460
+ 27,26
4461
+ 0,0
4462
+ 17,17
4463
+ 5,5
4464
+ 4,4
4465
+ 27,10
4466
+ 22,27
4467
+ 17,0
4468
+ 1,1
4469
+ 27,0
4470
+ 15,15
4471
+ 15,15
4472
+ 27,27
4473
+ 11,4
4474
+ 27,27
4475
+ 15,15
4476
+ 27,17
4477
+ 8,13
4478
+ 5,15
4479
+ 0,0
4480
+ 27,27
4481
+ 0,0
4482
+ 11,1
4483
+ 15,15
4484
+ 27,27
4485
+ 27,27
4486
+ 9,10
4487
+ 6,6
4488
+ 27,27
4489
+ 14,14
4490
+ 15,0
4491
+ 4,4
4492
+ 3,3
4493
+ 25,25
4494
+ 10,0
4495
+ 27,27
4496
+ 27,27
4497
+ 27,27
4498
+ 27,27
4499
+ 26,27
4500
+ 27,27
4501
+ 9,8
4502
+ 3,3
4503
+ 7,7
4504
+ 5,5
4505
+ 27,27
4506
+ 27,4
4507
+ 6,6
4508
+ 14,14
4509
+ 22,13
4510
+ 10,9
4511
+ 7,25
4512
+ 22,12
4513
+ 27,27
4514
+ 27,27
4515
+ 27,27
4516
+ 14,27
4517
+ 27,10
4518
+ 6,7
4519
+ 27,4
4520
+ 27,5
4521
+ 0,0
4522
+ 25,25
4523
+ 27,27
4524
+ 15,15
4525
+ 27,27
4526
+ 3,2
4527
+ 3,27
4528
+ 20,20
4529
+ 15,15
4530
+ 6,6
4531
+ 27,27
4532
+ 27,27
4533
+ 10,10
4534
+ 15,15
4535
+ 27,27
4536
+ 27,27
4537
+ 6,6
4538
+ 7,7
4539
+ 17,27
4540
+ 0,7
4541
+ 2,27
4542
+ 27,27
4543
+ 15,15
4544
+ 27,27
4545
+ 2,2
4546
+ 14,14
4547
+ 0,0
4548
+ 4,27
4549
+ 27,27
4550
+ 19,7
4551
+ 27,4
4552
+ 22,27
4553
+ 25,24
4554
+ 10,27
4555
+ 5,5
4556
+ 27,27
4557
+ 13,13
4558
+ 27,2
4559
+ 9,9
4560
+ 27,27
4561
+ 2,27
4562
+ 27,27
4563
+ 27,0
4564
+ 0,13
4565
+ 9,26
4566
+ 0,0
4567
+ 10,10
4568
+ 20,27
4569
+ 7,0
4570
+ 27,27
4571
+ 27,7
4572
+ 6,6
4573
+ 6,27
4574
+ 0,4
4575
+ 27,27
4576
+ 25,25
4577
+ 27,27
4578
+ 27,27
4579
+ 14,14
4580
+ 27,27
4581
+ 27,27
4582
+ 6,9
4583
+ 27,27
4584
+ 4,15
4585
+ 18,18
4586
+ 7,26
4587
+ 9,3
4588
+ 0,0
4589
+ 25,0
4590
+ 24,24
4591
+ 10,10
4592
+ 1,1
4593
+ 3,7
4594
+ 27,27
4595
+ 27,27
4596
+ 27,27
4597
+ 27,27
4598
+ 26,26
4599
+ 6,6
4600
+ 4,27
4601
+ 27,27
4602
+ 27,27
4603
+ 2,27
4604
+ 7,7
4605
+ 1,17
4606
+ 4,27
4607
+ 5,4
4608
+ 27,2
4609
+ 27,27
4610
+ 4,4
4611
+ 0,0
4612
+ 1,1
4613
+ 0,0
4614
+ 7,20
4615
+ 2,2
4616
+ 3,2
4617
+ 27,27
4618
+ 6,27
4619
+ 27,3
4620
+ 4,4
4621
+ 27,0
4622
+ 27,27
4623
+ 27,10
4624
+ 14,11
4625
+ 27,27
4626
+ 27,27
4627
+ 27,27
4628
+ 27,0
4629
+ 26,26
4630
+ 25,27
4631
+ 0,0
4632
+ 0,0
4633
+ 18,20
4634
+ 0,15
4635
+ 6,6
4636
+ 27,27
4637
+ 27,17
4638
+ 1,1
4639
+ 27,27
4640
+ 4,4
4641
+ 15,15
4642
+ 7,26
4643
+ 12,3
4644
+ 27,5
4645
+ 27,27
4646
+ 27,6
4647
+ 9,9
4648
+ 15,15
4649
+ 27,0
4650
+ 24,27
4651
+ 27,27
4652
+ 0,0
4653
+ 3,27
4654
+ 27,27
4655
+ 3,27
4656
+ 10,27
4657
+ 27,7
4658
+ 4,27
4659
+ 0,0
4660
+ 0,17
4661
+ 27,27
4662
+ 21,0
4663
+ 20,15
4664
+ 13,13
4665
+ 10,2
4666
+ 15,20
4667
+ 1,1
4668
+ 0,0
4669
+ 4,27
4670
+ 10,18
4671
+ 9,0
4672
+ 10,10
4673
+ 13,7
4674
+ 27,27
4675
+ 1,1
4676
+ 26,0
4677
+ 25,25
4678
+ 10,10
4679
+ 0,0
4680
+ 2,2
4681
+ 6,7
4682
+ 0,0
4683
+ 3,6
4684
+ 27,7
4685
+ 27,7
4686
+ 27,5
4687
+ 27,26
4688
+ 27,27
4689
+ 22,27
4690
+ 7,0
4691
+ 27,27
4692
+ 4,10
4693
+ 13,13
4694
+ 18,18
4695
+ 3,0
4696
+ 0,0
4697
+ 24,24
4698
+ 4,27
4699
+ 10,2
4700
+ 4,5
4701
+ 27,27
4702
+ 26,26
4703
+ 27,27
4704
+ 2,2
4705
+ 5,27
4706
+ 27,27
4707
+ 17,17
4708
+ 27,27
4709
+ 14,14
4710
+ 3,3
4711
+ 17,0
4712
+ 27,27
4713
+ 0,9
4714
+ 17,27
4715
+ 26,27
4716
+ 0,0
4717
+ 27,7
4718
+ 2,2
4719
+ 18,1
4720
+ 15,13
4721
+ 25,7
4722
+ 27,27
4723
+ 0,27
4724
+ 27,0
4725
+ 7,7
4726
+ 4,20
4727
+ 13,0
4728
+ 0,0
4729
+ 27,27
4730
+ 27,27
4731
+ 27,27
4732
+ 2,2
4733
+ 11,4
4734
+ 20,20
4735
+ 2,2
4736
+ 5,27
4737
+ 15,15
4738
+ 14,14
4739
+ 0,0
4740
+ 13,13
4741
+ 20,20
4742
+ 2,3
4743
+ 18,18
4744
+ 27,27
4745
+ 5,5
4746
+ 1,1
4747
+ 2,2
4748
+ 27,15
4749
+ 3,3
4750
+ 27,0
4751
+ 27,27
4752
+ 26,3
4753
+ 0,0
4754
+ 11,3
4755
+ 4,27
4756
+ 19,24
4757
+ 26,26
4758
+ 1,1
4759
+ 3,18
4760
+ 7,7
4761
+ 18,2
4762
+ 27,27
4763
+ 27,4
4764
+ 0,0
4765
+ 3,27
4766
+ 0,0
4767
+ 27,27
4768
+ 27,0
4769
+ 1,1
4770
+ 27,0
4771
+ 10,27
4772
+ 21,0
4773
+ 27,10
4774
+ 12,12
4775
+ 18,18
4776
+ 9,27
4777
+ 6,6
4778
+ 27,27
4779
+ 10,27
4780
+ 10,6
4781
+ 27,10
4782
+ 27,27
4783
+ 27,7
4784
+ 27,27
4785
+ 0,0
4786
+ 3,27
4787
+ 7,7
4788
+ 14,14
4789
+ 4,27
4790
+ 27,27
4791
+ 15,15
4792
+ 27,27
4793
+ 10,10
4794
+ 18,18
4795
+ 27,27
4796
+ 14,14
4797
+ 7,7
4798
+ 7,7
4799
+ 0,0
4800
+ 6,6
4801
+ 0,0
4802
+ 27,27
4803
+ 1,1
4804
+ 7,7
4805
+ 13,27
4806
+ 15,15
4807
+ 3,27
4808
+ 27,27
4809
+ 27,27
4810
+ 27,27
4811
+ 2,2
4812
+ 17,17
4813
+ 7,7
4814
+ 1,1
4815
+ 27,27
4816
+ 20,20
4817
+ 25,10
4818
+ 20,20
4819
+ 27,1
4820
+ 17,13
4821
+ 27,27
4822
+ 0,5
4823
+ 27,27
4824
+ 15,0
4825
+ 27,5
4826
+ 25,24
4827
+ 0,5
4828
+ 10,27
4829
+ 9,25
4830
+ 2,10
4831
+ 6,27
4832
+ 27,1
4833
+ 9,3
4834
+ 27,27
4835
+ 27,3
4836
+ 8,8
4837
+ 27,17
4838
+ 0,0
4839
+ 15,15
4840
+ 27,0
4841
+ 6,27
4842
+ 20,27
4843
+ 6,7
4844
+ 27,27
4845
+ 9,9
4846
+ 4,0
4847
+ 14,14
4848
+ 6,6
4849
+ 27,27
4850
+ 26,25
4851
+ 11,11
4852
+ 27,27
4853
+ 27,27
4854
+ 7,3
4855
+ 27,2
4856
+ 2,27
4857
+ 27,27
4858
+ 20,17
4859
+ 25,2
4860
+ 10,10
4861
+ 27,27
4862
+ 0,0
4863
+ 17,17
4864
+ 24,24
4865
+ 14,14
4866
+ 1,1
4867
+ 27,27
4868
+ 27,27
4869
+ 9,9
4870
+ 14,14
4871
+ 3,27
4872
+ 27,27
4873
+ 6,0
4874
+ 27,27
4875
+ 0,0
4876
+ 27,27
4877
+ 15,15
4878
+ 14,14
4879
+ 10,10
4880
+ 4,27
4881
+ 27,7
4882
+ 17,17
4883
+ 1,1
4884
+ 27,27
4885
+ 8,2
4886
+ 2,2
4887
+ 11,2
4888
+ 27,4
4889
+ 10,27
4890
+ 18,18
4891
+ 4,26
4892
+ 25,25
4893
+ 27,13
4894
+ 27,27
4895
+ 26,3
4896
+ 27,27
4897
+ 27,7
4898
+ 0,0
4899
+ 27,27
4900
+ 27,27
4901
+ 27,27
4902
+ 7,7
4903
+ 3,0
4904
+ 2,2
4905
+ 27,27
4906
+ 27,27
4907
+ 27,27
4908
+ 1,1
4909
+ 27,27
4910
+ 3,3
4911
+ 27,27
4912
+ 0,18
4913
+ 12,3
4914
+ 3,7
4915
+ 0,14
4916
+ 27,27
4917
+ 17,17
4918
+ 27,27
4919
+ 4,27
4920
+ 27,3
4921
+ 13,13
4922
+ 27,27
4923
+ 27,27
4924
+ 7,27
4925
+ 4,27
4926
+ 27,27
4927
+ 15,15
4928
+ 1,1
4929
+ 1,1
4930
+ 27,26
4931
+ 27,4
4932
+ 7,7
4933
+ 26,27
4934
+ 15,15
4935
+ 9,27
4936
+ 27,27
4937
+ 0,0
4938
+ 4,27
4939
+ 27,27
4940
+ 27,27
4941
+ 27,27
4942
+ 6,7
4943
+ 6,6
4944
+ 27,4
4945
+ 0,27
4946
+ 8,20
4947
+ 27,22
4948
+ 0,0
4949
+ 26,7
4950
+ 26,26
4951
+ 20,20
4952
+ 27,27
4953
+ 27,27
4954
+ 27,27
4955
+ 3,2
4956
+ 27,10
4957
+ 1,17
4958
+ 24,24
4959
+ 15,15
4960
+ 10,27
4961
+ 10,11
4962
+ 27,22
4963
+ 4,4
4964
+ 14,13
4965
+ 4,27
4966
+ 27,27
4967
+ 22,27
4968
+ 18,18
4969
+ 27,27
4970
+ 27,27
4971
+ 27,27
4972
+ 27,27
4973
+ 0,0
4974
+ 27,18
4975
+ 27,27
4976
+ 3,27
4977
+ 27,0
4978
+ 27,27
4979
+ 13,4
4980
+ 27,27
4981
+ 27,27
4982
+ 10,27
4983
+ 5,27
4984
+ 27,27
4985
+ 1,1
4986
+ 4,0
4987
+ 17,17
4988
+ 15,15
4989
+ 0,0
4990
+ 27,27
4991
+ 3,3
4992
+ 0,0
4993
+ 7,7
4994
+ 6,26
4995
+ 27,27
4996
+ 15,15
4997
+ 15,5
4998
+ 0,17
4999
+ 7,7
5000
+ 27,27
5001
+ 3,27
5002
+ 1,1
5003
+ 27,10
5004
+ 20,27
5005
+ 15,15
5006
+ 27,27
5007
+ 4,4
5008
+ 0,27
5009
+ 20,8
5010
+ 4,27
5011
+ 27,27
5012
+ 3,14
5013
+ 2,15
5014
+ 27,27
5015
+ 27,27
5016
+ 15,15
5017
+ 20,20
5018
+ 11,27
5019
+ 4,4
5020
+ 8,7
5021
+ 2,2
5022
+ 27,27
5023
+ 3,7
5024
+ 6,6
5025
+ 27,7
5026
+ 2,2
5027
+ 27,24
5028
+ 1,1
5029
+ 1,1
5030
+ 4,4
5031
+ 27,27
5032
+ 5,5
5033
+ 3,2
5034
+ 27,27
5035
+ 27,27
5036
+ 13,8
5037
+ 4,27
5038
+ 27,27
5039
+ 15,15
5040
+ 27,27
5041
+ 27,27
5042
+ 4,4
5043
+ 4,27
5044
+ 22,1
5045
+ 20,27
5046
+ 8,8
5047
+ 7,7
5048
+ 22,27
5049
+ 0,0
5050
+ 0,27
5051
+ 1,1
5052
+ 4,4
5053
+ 3,12
5054
+ 27,10
5055
+ 9,25
5056
+ 18,7
5057
+ 1,1
5058
+ 26,2
5059
+ 0,0
5060
+ 0,18
5061
+ 25,27
5062
+ 11,7
5063
+ 4,27
5064
+ 25,25
5065
+ 8,8
5066
+ 3,2
5067
+ 3,2
5068
+ 27,10
5069
+ 15,15
5070
+ 27,10
5071
+ 6,6
5072
+ 14,14
5073
+ 27,27
5074
+ 18,18
5075
+ 9,25
5076
+ 0,18
5077
+ 14,14
5078
+ 27,5
5079
+ 15,15
5080
+ 27,0
5081
+ 0,0
5082
+ 4,27
5083
+ 22,9
5084
+ 25,25
5085
+ 27,27
5086
+ 27,27
5087
+ 26,27
5088
+ 13,13
5089
+ 12,27
5090
+ 12,27
5091
+ 27,27
5092
+ 0,0
5093
+ 18,18
5094
+ 7,27
5095
+ 12,24
5096
+ 5,5
5097
+ 3,2
5098
+ 18,18
5099
+ 25,25
5100
+ 9,27
5101
+ 15,15
5102
+ 4,27
5103
+ 0,0
5104
+ 4,20
5105
+ 15,15
5106
+ 18,18
5107
+ 27,7
5108
+ 10,27
5109
+ 27,26
5110
+ 3,25
5111
+ 17,17
5112
+ 0,0
5113
+ 0,17
5114
+ 9,27
5115
+ 4,10
5116
+ 27,27
5117
+ 27,27
5118
+ 1,1
5119
+ 13,13
5120
+ 27,7
5121
+ 27,5
5122
+ 0,17
5123
+ 3,27
5124
+ 3,9
5125
+ 3,27
5126
+ 8,20
5127
+ 11,11
5128
+ 27,27
5129
+ 9,27
5130
+ 0,0
5131
+ 5,5
5132
+ 6,7
5133
+ 27,27
5134
+ 27,27
5135
+ 5,20
5136
+ 3,2
5137
+ 27,27
5138
+ 4,27
5139
+ 27,27
5140
+ 10,27
5141
+ 3,3
5142
+ 20,0
5143
+ 6,27
5144
+ 27,27
5145
+ 0,0
5146
+ 7,7
5147
+ 27,27
5148
+ 18,1
5149
+ 6,27
5150
+ 13,15
5151
+ 27,27
5152
+ 0,0
5153
+ 27,7
5154
+ 18,18
5155
+ 20,27
5156
+ 1,0
5157
+ 25,25
5158
+ 27,4
5159
+ 27,0
5160
+ 27,27
5161
+ 25,25
5162
+ 25,25
5163
+ 0,0
5164
+ 3,2
5165
+ 6,7
5166
+ 3,27
5167
+ 0,0
5168
+ 0,0
5169
+ 5,17
5170
+ 2,2
5171
+ 27,27
5172
+ 15,15
5173
+ 5,5
5174
+ 4,27
5175
+ 27,27
5176
+ 2,26
5177
+ 25,6
5178
+ 3,3
5179
+ 27,27
5180
+ 0,0
5181
+ 13,13
5182
+ 27,10
5183
+ 27,27
5184
+ 27,27
5185
+ 24,14
5186
+ 27,13
5187
+ 27,27
5188
+ 27,27
5189
+ 0,0
5190
+ 1,1
5191
+ 15,15
5192
+ 4,4
5193
+ 7,7
5194
+ 2,2
5195
+ 27,20
5196
+ 18,0
5197
+ 13,13
5198
+ 6,27
5199
+ 27,4
5200
+ 27,27
5201
+ 0,0
5202
+ 6,10
5203
+ 27,27
5204
+ 7,7
5205
+ 13,0
5206
+ 4,4
5207
+ 4,4
5208
+ 1,1
5209
+ 11,11
5210
+ 27,27
5211
+ 9,25
5212
+ 27,27
5213
+ 0,0
5214
+ 15,15
5215
+ 7,7
5216
+ 0,27
5217
+ 0,27
5218
+ 25,25
5219
+ 1,1
5220
+ 27,1
5221
+ 0,0
5222
+ 27,27
5223
+ 21,17
5224
+ 5,5
5225
+ 27,27
5226
+ 6,7
5227
+ 27,4
5228
+ 4,9
5229
+ 27,27
5230
+ 27,27
5231
+ 0,0
5232
+ 27,27
5233
+ 27,27
5234
+ 27,27
5235
+ 9,9
5236
+ 27,27
5237
+ 10,27
5238
+ 8,20
5239
+ 7,7
5240
+ 7,7
5241
+ 26,26
5242
+ 10,10
5243
+ 10,27
5244
+ 15,15
5245
+ 4,10
5246
+ 26,26
5247
+ 2,0
5248
+ 1,1
5249
+ 0,0
5250
+ 27,27
5251
+ 27,27
5252
+ 1,1
5253
+ 6,6
5254
+ 0,0
5255
+ 4,27
5256
+ 25,25
5257
+ 2,2
5258
+ 0,18
5259
+ 4,27
5260
+ 27,27
5261
+ 27,27
5262
+ 0,0
5263
+ 27,0
5264
+ 4,27
5265
+ 0,0
5266
+ 20,27
5267
+ 27,27
5268
+ 4,4
5269
+ 6,6
5270
+ 0,0
5271
+ 1,1
5272
+ 27,27
5273
+ 27,27
5274
+ 27,4
5275
+ 6,6
5276
+ 20,20
5277
+ 0,0
5278
+ 7,7
5279
+ 7,6
5280
+ 0,15
5281
+ 27,7
5282
+ 9,25
5283
+ 3,4
5284
+ 27,27
5285
+ 27,15
5286
+ 27,27
5287
+ 7,7
5288
+ 3,15
5289
+ 17,20
5290
+ 27,27
5291
+ 4,4
5292
+ 26,26
5293
+ 27,27
5294
+ 0,0
5295
+ 6,27
5296
+ 27,3
5297
+ 2,27
5298
+ 27,27
5299
+ 27,27
5300
+ 0,1
5301
+ 27,27
5302
+ 3,27
5303
+ 25,25
5304
+ 10,2
5305
+ 13,13
5306
+ 0,0
5307
+ 27,27
5308
+ 18,18
5309
+ 27,27
5310
+ 27,27
5311
+ 3,2
5312
+ 27,5
5313
+ 15,15
5314
+ 7,7
5315
+ 27,27
5316
+ 3,27
5317
+ 22,27
5318
+ 3,27
5319
+ 20,20
5320
+ 10,27
5321
+ 7,15
5322
+ 9,25
5323
+ 4,4
5324
+ 27,27
5325
+ 1,1
5326
+ 27,27
5327
+ 3,2
5328
+ 27,27
5329
+ 4,17
5330
+ 0,0
5331
+ 27,27
5332
+ 4,27
5333
+ 2,3
5334
+ 0,0
5335
+ 1,1
5336
+ 0,4
5337
+ 10,10
5338
+ 25,25
5339
+ 27,27
5340
+ 27,27
5341
+ 25,25
5342
+ 11,2
5343
+ 3,27
5344
+ 25,25
5345
+ 0,27
5346
+ 22,27
5347
+ 5,27
5348
+ 8,27
5349
+ 0,0
5350
+ 27,27
5351
+ 5,3
5352
+ 0,0
5353
+ 25,7
5354
+ 27,7
5355
+ 27,27
5356
+ 27,2
5357
+ 27,27
5358
+ 8,8
5359
+ 2,3
5360
+ 27,7
5361
+ 17,17
5362
+ 27,13
5363
+ 27,27
5364
+ 9,20
5365
+ 4,4
5366
+ 15,15
5367
+ 15,15
5368
+ 9,3
5369
+ 27,27
5370
+ 13,7
5371
+ 3,1
5372
+ 10,10
5373
+ 20,20
5374
+ 5,0
5375
+ 1,27
5376
+ 27,27
5377
+ 6,7
5378
+ 27,27
5379
+ 7,7
5380
+ 4,4
5381
+ 22,22
5382
+ 10,10
5383
+ 2,2
5384
+ 8,27
5385
+ 2,2
5386
+ 0,0
5387
+ 27,27
5388
+ 7,27
5389
+ 3,3
5390
+ 27,27
5391
+ 13,14
5392
+ 0,0
5393
+ 9,27
5394
+ 8,5
5395
+ 27,27
5396
+ 6,3
5397
+ 27,10
5398
+ 1,1
5399
+ 4,27
5400
+ 18,18
5401
+ 26,0
5402
+ 22,27
5403
+ 9,9
5404
+ 27,27
5405
+ 0,0
5406
+ 1,1
5407
+ 0,15
5408
+ 27,27
5409
+ 8,27
5410
+ 15,15
5411
+ 3,14
5412
+ 9,9
5413
+ 27,6
5414
+ 3,0
5415
+ 10,10
5416
+ 7,25
5417
+ 4,27
5418
+ 18,18
5419
+ 15,15
5420
+ 27,27
5421
+ 1,1
5422
+ 27,14
5423
+ 27,27
5424
+ 15,15
5425
+ 4,4
5426
+ 27,27
5427
+ 0,0
5428
+ 27,27
ui.png ADDED

Git LFS Details

  • SHA256: 85da188ac70273705aa8806c89eec9dd7840c7eeb88b020137df0ed4b71d4ee3
  • Pointer size: 131 Bytes
  • Size of remote file: 122 kB