# -*- coding: utf-8 -*-
"""using_dataset_hugginface.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
"""

"""**Hugginface loggin for push on Hub**"""
###
#
#  Used bibliografy:
#    https://huggingface.co/learn/nlp-course/chapter5/5
#
###

import os
import time
import math
from huggingface_hub import login
from datasets import load_dataset, concatenate_datasets
from functools import reduce
from pathlib import Path
import pandas as pd
import mysql.connector

# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM

HF_TOKEN = ''
DATASET_TO_LOAD = 'PlanTL-GOB-ES/pharmaconer'
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'

#Loggin to Huggin Face
login(token = HF_TOKEN)

dataset_CODING = load_dataset(DATASET_TO_LOAD)
dataset_CODING
royalListOfCode = {}
issues_path = 'dataset'
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
DATASET_SOURCE_ID = '3'
#Read current path
path = Path(__file__).parent.absolute()

'''
  Bibliografy:
    https://www.w3schools.com/python/python_mysql_getstarted.asp
    https://www.w3schools.com/python/python_mysql_select.as

'''
mydb = mysql.connector.connect(
  host="localhost",
  user="root",
  password="",
  database="icd10_dx_hackatonnlp"
)



def getCodeDescription(labels_of_type):
  """
    Search description associated with some code 
    in royalListOfCode

  """
  icd10CodeDict = {}
  mycursor = mydb.cursor()
  codeIcd10 = ''
  
  for iValue in labels_of_type:
    codeIcd10 = iValue

    if codeIcd10.find('.') == -1:
      codeIcd10 += '.0'

    mycursor.execute(f"SELECT dx_code, long_desc FROM `icd10_dx_order_code`  WHERE dx_code = '{codeIcd10}' LIMIT 1;")

    myresult = mycursor.fetchall()

    for x in myresult:
      code, description = x
      icd10CodeDict[code] = description

  return icd10CodeDict


    # raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.

    # topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)

    # speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)

    # raw_text_type: (puede ser caso clínico, open_text, question)

    # topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)

    # source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.

    # country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
cantemistDstDict = {
  'raw_text': '',
  'topic': '',
  'speciallity': '',
  'raw_text_type': 'clinic_case',
  'topic_type': '',
  'source': DATASET_SOURCE_ID,
  'country': 'es',
  'document_id': ''
}

totalOfTokens = 0
corpusToLoad = []
countCopySeveralDocument = 0
counteOriginalDocument = 0

for iDataset in dataset_CODING:
    if iDataset == 'train':
      for item in dataset_CODING[iDataset]:
        #print ("Element in dataset")
        idFile = str(item['id'])
        text = '' if len(item['tokens']) == 0 else reduce(lambda a, b: a + " "+ b, item['tokens'], "")

        #Find topic or diagnosti clasification about the text
       
        counteOriginalDocument += 1  
        newCorpusRow = cantemistDstDict.copy()

          #print('Current text has ', currentSizeOfTokens)
          #print('Total of tokens is ', totalOfTokens)

        listOfTokens = tokenizer.tokenize(text)
        currentSizeOfTokens = len(listOfTokens)
        totalOfTokens += currentSizeOfTokens

        newCorpusRow['raw_text'] = text
        newCorpusRow['document_id'] = idFile
        corpusToLoad.append(newCorpusRow)
        
df = pd.DataFrame.from_records(corpusToLoad)

if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
  os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")


df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
print(
        f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
)

print(' On dataset there are as document ', counteOriginalDocument)
print(' On dataset there are as copy document ', countCopySeveralDocument)
print(' On dataset there are as size of Tokens ', totalOfTokens)
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")  # or Path('./doc.txt')
size = file.stat().st_size
print ('File size on Kilobytes (kB)', size >> 10)  # 5242880 kilobytes (kB)
print ('File size on Megabytes  (MB)', size >> 20 ) # 5120 megabytes (MB)
print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)

##Update local dataset with cloud dataset
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")

print (' Local Dataset ==> ')
print(local_spanish_dataset)

try:  
  spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
  spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
except Exception:
  spanish_dataset = local_spanish_dataset

spanish_dataset.push_to_hub(DATASET_TO_UPDATE)

print(spanish_dataset)

# Augmenting the dataset

#Importan if exist element on DATASET_TO_UPDATE we must to update element 
# in list, and review if the are repeted elements

#spanish_dataset.push_to_hub(DATASET_TO_UPDATE)