File size: 6,176 Bytes
bf6b618 5fa5a4a bf6b618 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import pandas as pd
import io
import re
import yaml
from typing import List, Optional
from fastapi import FastAPI, File, UploadFile, HTTPException, Query
from fastapi.responses import JSONResponse
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
# Carregar configuração
with open("column_config.yaml") as f:
COLUMN_CONFIG = yaml.safe_load(f)
# Função para detectar tipos de colunas
def detect_column_type(dtype):
if pd.api.types.is_datetime64_any_dtype(dtype):
return "datetime"
elif pd.api.types.is_numeric_dtype(dtype):
return "number"
return "text"
# Normalização de colunas
def normalize_column_names(column_names: List[str]) -> List[str]:
normalized = []
for raw_col in column_names:
sanitized = re.sub(r'[\W]+', '_', raw_col.strip()).lower().strip('_')
for config_col, config in COLUMN_CONFIG['columns'].items():
synonyms = [
re.sub(r'[\W]+', '_', s.strip()).lower().strip('_')
for s in [config_col] + config.get('synonyms', [])
]
if sanitized in synonyms:
normalized.append(config_col)
break
else:
normalized.append(sanitized)
return normalized
# Limpeza de dados aprimorada
def clean_data(df: pd.DataFrame) -> pd.DataFrame:
df.columns = normalize_column_names(df.columns)
# Tratamento de valores ausentes
for col in df.columns:
if col in COLUMN_CONFIG['columns']:
col_type = COLUMN_CONFIG['columns'][col].get('type', 'text')
if col_type == 'datetime':
df[col] = pd.to_datetime(df[col], errors='coerce')
elif col_type == 'numeric':
df[col] = pd.to_numeric(df[col], errors='coerce')
elif col_type == 'categorical':
allowed = COLUMN_CONFIG['columns'][col].get('allowed', [])
df[col] = df[col].where(df[col].isin(allowed), None)
# Tratamento de formatos inconsistentes
for col in df.columns:
if col in COLUMN_CONFIG['columns']:
col_type = COLUMN_CONFIG['columns'][col].get('type', 'text')
if col_type == 'datetime':
fmt = COLUMN_CONFIG['columns'][col].get('format')
df[col] = pd.to_datetime(df[col], errors='coerce', format=fmt)
df[col] = df[col].dt.strftime('%Y-%m-%dT%H:%M:%SZ')
elif col_type == 'numeric':
df[col] = pd.to_numeric(df[col], errors='coerce').astype(float)
elif col_type == 'categorical':
allowed = COLUMN_CONFIG['columns'][col].get('allowed', [])
df[col] = df[col].where(df[col].isin(allowed))
# Tratamento de outliers
for col in df.columns:
if col in COLUMN_CONFIG['columns']:
col_type = COLUMN_CONFIG['columns'][col].get('type', 'text')
if col_type == 'numeric':
q1 = df[col].quantile(0.25)
q3 = df[col].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
df[col] = df[col].clip(lower=lower_bound, upper=upper_bound)
# Tratamento de registros duplicados
df.drop_duplicates(inplace=True)
# Tratamento de tipos de dados mistos
for col in df.columns:
if col in COLUMN_CONFIG['columns']:
col_type = COLUMN_CONFIG['columns'][col].get('type', 'text')
if col_type == 'numeric':
df[col] = pd.to_numeric(df[col], errors='coerce')
elif col_type == 'datetime':
df[col] = pd.to_datetime(df[col], errors='coerce')
# Tratamento de dados ruídos
for col in df.columns:
if col in COLUMN_CONFIG['columns']:
col_type = COLUMN_CONFIG['columns'][col].get('type', 'text')
if col_type == 'text':
df[col] = df[col].str.strip().str.lower()
return df.replace({pd.NA: None})
# Função para processar o arquivo e retornar dados limpos
def process_file(file: UploadFile, sheet_name: Optional[str] = None) -> pd.DataFrame:
try:
content = file.file.read()
extension = file.filename.split('.')[-1]
if extension == 'csv':
df = pd.read_csv(io.BytesIO(content))
elif extension == 'xlsx':
if sheet_name is None:
sheet_name = 0 # Default to the first sheet
df = pd.read_excel(io.BytesIO(content), sheet_name=sheet_name)
else:
raise HTTPException(400, "Formato de arquivo não suportado")
return df, clean_data(df)
except Exception as e:
raise HTTPException(500, f"Erro ao processar o arquivo: {str(e)}")
# Endpoint para upload e processamento de arquivos
@app.post("/process-file")
async def process_file_endpoint(file: UploadFile = File(...), sheet_name: Optional[str] = Query(None)):
try:
raw_df, df = process_file(file, sheet_name)
columns = [{
"name": col,
"type": detect_column_type(df[col].dtype)
} for col in df.columns]
rows = []
for idx, row in df.iterrows():
cells = {}
for col, val in row.items():
cells[col] = {
"value": val,
"displayValue": str(val),
"columnId": col
}
rows.append({"id": str(idx), "cells": cells})
return JSONResponse(
content={
"data": {
"columns": columns,
"rows": rows
},
"metadata": {
"totalRows": len(df),
"processedAt": pd.Timestamp.now().isoformat()
}
})
except Exception as e:
raise HTTPException(500, f"Erro: {str(e)}")
# Configuração de CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
if __name__ == "__main__":
uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=True) |