seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7824439491
|
#LIBRERIAS A UTILIZAR
import pandas as pd
from sklearn.impute import SimpleImputer
import numpy as np
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split #libreria para poder separar los datos entre entrenamiento y test
from sklearn.linear_model import LinearRegression #libreria para poder generar la regresion lineal
from sklearn import metrics
#ANALISIS PREVIO
"""
En el analisis previo con solo mirar el archivo, puedo deducir que es un archivo que contiene
edad de una persona, sexo de la persona solicitante del seguro,bmi(segun lo que googlie es un reembolso que se le puede dar),
cuantos hijos tiene, si es fumador o no, y el precio del seguro a abonar.
De esto puedo inferir que mi prediccion va a ser con una regresion lineal multiple ya que la variable dependiete es el cargo del seguro, y es afectada
por todas las demas columnas, es decir, un seguro aumentara por todas las condiciones anteriores(edad, bmi, hijos, si es fumador o no y de que zona es la persona solicitante)
"""
#-------------GUARDO MI ARCHIVO A USAR -------------------------------
ruta =r'Unsam.Clase.12.4.6.2021\Unsam.Clase.12.4.6.2021\cvs_para_el_TP\seguro.csv'
dataSet = pd.read_csv(ruta) #archivo a utilizar
#-----------------ANALISIS DE MIS DATOS ---------------------
print(dataSet)
#print("CANTIDAD DE FILAS Y COLUMNAS")
#print(dataSet.shape)
#no hace falta hacer el shape ya que al imprimir el dataSet, me dice cuantas filas y columnas tengo
print(dataSet.describe())
#compruebo si hay valores nan en mi codigo
print("Valores null o nan en el dataSet: ")
print(dataSet.isna().values.any())
print(dataSet.isnull().values.any())
#devuelve falso por ende no tengo valores NaN o null
#-------------SEPARO LAS VARIABLES QUE VOY A USAR------------
X = dataSet[['age','sex','bmi','children','smoker','region' ]].values
Y = dataSet['charges'].values
print("X:\n")
print(X) #matriz /variables independientes
print("Y:\n")
print(Y) #vector /variables dependientes
#----------------COMIENZO A PREPARAR LOS DATOS-------------
"""
La edad y el bmi, son valores numericos ya manipulables , por ende , los dejo como estan para poder realizar mi analisis
"""
#MODIFICO LA COLUMNA DEL SEXO PARA MANEJARME CON 1 Y 0
labelencoder_X=LabelEncoder()
X[:,1]=labelencoder_X.fit_transform(X[:,1])
#MODIFICO LA COMLUNMA DE SI ES FUMADOR CON 1 Y 0
labelencoder_X=LabelEncoder()
X[:,4]=labelencoder_X.fit_transform(X[:,4])
#MODIFICO LA COLUMNNA DE SUR Y NORTE (CON ORIENTACION ESTE Y OESTE)
labelencoder_X=LabelEncoder()
X[:,-1]=labelencoder_X.fit_transform(X[:,-1])
print("Matriz X luego de preparacion de datos : \n")
print(X)
#PREGUNTAR SI HAY QUE ESCALAR LA EDAD?
#-------------GRAFICO MI VARIABLE DEPENDIENTE PARA SACAR ALGUNA CONCLUSION -----------
plt.figure(figsize=(10,5))
plt.tight_layout()
plt.title('Densidad de mi variable Dependiente')
seabornInstance.distplot(dataSet['charges'], color = 'lightblue')
plt.show()
"""
De este Grafico podemos concluir que mi variable dependiente 'charges' que seria el precio a pagar del seguro,
varia entre 0 y 70000 , y la mayor densidad(mayor cantidad de match) las hace desde el 0 hasta el 10000 aproximadamente.
"""
#------------------DIVIDIR LOS DATOS EN ENTRENAMIENTO Y TEST -----------------
#80 porciento de mis datos en entrenamiento y 20 en test
X_train , X_test, Y_train, Y_test = train_test_split(X , Y , test_size = 0.2, random_state=0)
#comienzo a entrenar mi modelo
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
#------------------ELIMINO MI COLUMNA QUE ES LA DE CARGOS(VARIABLE DEPENDIENTE)-----------
dataFrame= dataSet.drop(['charges'], axis = 1) #dropeo(elimino) mi variale dependiente de mi data frame
dataFrame = dataFrame.T #transpongo filas por columnas PODRIA SER TRANSPOSE()
dataFrame = dataFrame.index #me guardo en mi dataframe solo las etiquetas
print(dataFrame) #imprimo las etiquetas para verificar que estan bien guardadas
#-----------------ENCONTRAR LOS COEFICIENTES MAS OPTIMOS PARA MIS ATRIBUTOS----------------
coeficiente_dataFrame = pd.DataFrame(regressor.coef_, dataFrame, columns= ['Coeficiente'])
print(coeficiente_dataFrame)
#luego de imprimir concluyo que transpuso filas por columnas y tiene sus coeficentes correspondientes
#---------------PREDICCION--------------------
y_prediccion = regressor.predict(X_test)
#agarro mi dataFrame donde estan mis indices
dataFrame = pd.DataFrame({'Actual': Y_test, 'Prediccion': y_prediccion})
dataFrame_prediccion = dataFrame.head(30)
print(dataFrame_prediccion)
#-------------------GRAFICO MI PREDICCION -----------------
dataFrame_prediccion.plot(kind='bar',figsize=(10,8)) #LE DIGO QUE ES UN GRAFICO DE BARRAS Y EL TAMAÑO
plt.title('Actual vs Prediccion')
plt.grid(which='major', linestyle='-', linewidth='0.5', color='black')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
#----------------ME FIJO EL RENDIMIENTO DE MI ALGORITMO ---------------
print('Promedio de Error Absoluto:', metrics.mean_absolute_error(Y_test, y_prediccion))
print('Promedio de Error de Raiz:', metrics.mean_squared_error(Y_test, y_prediccion))
print('Error cuadratico medio de la raiz:', np.sqrt(metrics.mean_squared_error(Y_test, y_prediccion)))
#CONCLUSION FINAL
"""
Por el analisis hecho, podemos deducir que la prediccion no es nada buena, ya que los promedios de error
superan ampliamente el 10 porciento, es aproximadamente, un 15 porciento (siendo generoso), no es un analisis muy exacto, pero para nuestro caso
es aceptable(porque use regresion lineal multiple, entonces entre mas datos afecten a mi variable dependiente , menos me voy a acercar a una prediccion acertada),
ya que las columnas en las que supera ese porcentaje son la 14 , la 20 y la 23, que siendo generoso tendrian aproximadamente un 25 porciento extra.
"""
"""
PD: Perdon por mi manera de hablar o escribir pero la verdad que en las conclusiones tarde mas que en el codigo jajaja.
Si hay algo que deba cambiar o mejorar porfavor diganmelo
Muchas gracias por TODO
Atte : Alex Sosa :)
"""
|
alextsosa17/Analisis-de-Datos-y-prediccion--Python
|
TpFinalAlexSosa.py
|
TpFinalAlexSosa.py
|
py
| 6,299 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_absolute_error",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 126,
"usage_type": "name"
}
] |
43918724986
|
import streamlit as st
import pandas as pd
import plotly.express as px
import seaborn as sns
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
st.set_option('deprecation.showPyplotGlobalUse', False)
# Loading dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class'])
st.title('Exploratory Data Analysis of the Iris Dataset')
st.header('This app allows you to explore the Iris dataset and visualize the data using various plots.')
st.subheader("DataFrame")
st.dataframe(df)
selected_column = st.sidebar.selectbox('Select a column to visualize', df.columns)
st.write("Histogram Plots")
sns.histplot(df[selected_column])
st.pyplot()
st.write("Scatter plot")
x_axis = st.sidebar.selectbox('Select the x-axis', df.columns)
y_axis = st.sidebar.selectbox('Select the y-axis', df.columns)
fig = px.scatter(df, x=x_axis, y=y_axis)
st.plotly_chart(fig)
st.write("Pair Plot")
sns.pairplot(df, hue='class')
st.pyplot()
st.write("Description of the data")
st.table(df.describe())
st.header('Correlation Matrix')
corr = df.corr()
sns.heatmap(corr, annot=True)
st.pyplot()
st.header('Boxplot')
fig = px.box(df, y=selected_column)
st.plotly_chart(fig)
selected_class = st.sidebar.selectbox('Select a class to visualize', df['class'].unique())
if st.sidebar.button('Show Violin Plot'):
fig = px.violin(df[df['class'] == selected_class], y=selected_column)
st.plotly_chart(fig)
|
avrabyt/Holiday-coding-session
|
streamlit_app.py
|
streamlit_app.py
|
py
| 1,556 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "ssl._create_default_https_context",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "streamlit.set_option",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "streamlit.write",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "seaborn.histplot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.pyplot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "plotly.express.scatter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "seaborn.pairplot",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "streamlit.pyplot",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "streamlit.table",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "streamlit.pyplot",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "plotly.express.box",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.button",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "plotly.express.violin",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 53,
"usage_type": "call"
}
] |
27937806135
|
#!/usr/bin/python
# coding: utf-8
import numpy as np
import cv2
import csv
import os
import shutil
import shutil
import logging
def to_image_string(image_filepath):
return open(image_filepath, "rb").read().encode("base64")
def from_base64(base64_data):
nparr = np.fromstring(base64_data.decode("base64"), np.uint8)
return cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
# clean all non-alphanumberic characters
def strip(string):
words = string.split()
words = [word for word in words if "#" not in word]
string = " ".join(words)
clean = ""
for c in string:
if str.isalnum(c) or (c in [" ", ".", ","]):
clean += c
return clean
# creating CSV header
def create_csv(filename):
with open(filename, "w+", newline="", encoding="utf-8") as save_file:
writer = csv.writer(save_file)
writer.writerow(["Author", "uTime", "Text"])
def write_to_csv(filename, data):
with open(filename, "a+", newline="", encoding="utf-8") as save_file:
writer = csv.writer(save_file)
writer.writerow(data)
def empty_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
empty_folder(file_path)
except Exception as e:
logging.info("Failed to delete %s. Reason: %s" % (file_path, e))
def move_file(source, dest):
shutil.move(source, dest)
|
Zyniel/DansePlanningManager
|
src/app/utils.py
|
utils.py
|
py
| 1,569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.fromstring",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.imdecode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_ANYCOLOR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.path.islink",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 59,
"usage_type": "call"
}
] |
12267302602
|
# -*- coding: utf-8 -*-
import scrapy
from collections import OrderedDict
class BriefingEarningsSpider(scrapy.Spider):
name = 'briefing_earnings'
allowed_domains = ['www.briefing.com']
start_urls = ['https://www.briefing.com/Inv/content/Auth/Calendar/Earnings/week1.htm'] # Current week (week1)
def parse(self, response):
dates_lst = response.xpath('//*[@class="calDATE"]/text()').getall() # Get list of days (dates)
dates = {dates_lst[day]: day for day in range(len(dates_lst))}
dates_sort = OrderedDict(sorted(dates.items(), key=lambda x: x[1])) # Ordered dict to save que
for i, day in enumerate(dates_sort):
block = response.xpath('//*[@class="calDATE"]/following-sibling::ul')[i] # Block for day
events_lst = block.xpath('.//div[contains(@class,"calEVENT")]') # Block for ticket
tickets = OrderedDict()
for event in events_lst:
ticket = event.xpath('.//span/@data-ticker-search').get()
name = event.xpath('.//strong/text()').get()
surprise_value = event.xpath(
'.//span[contains(text(), "Surprise:")]/following-sibling::span/text()').get()
act_value = event.xpath('.//span[contains(text(), "Act:")]/following-sibling::span/text()').get()
cons_value = event.xpath('.//span[contains(text(), "Cons:")]/following-sibling::span/text()').get()
tickets.update({ticket: {'name': name,
'surprise_value': surprise_value,
'actual_value': act_value,
'consensus_value': cons_value}})
dates_sort.update({day: tickets}) # Add all tickets with values for day
yield dates_sort
|
kompotkot/WebScraper-Stocksinplay
|
stocksinplay/spiders/briefing_earnings.py
|
briefing_earnings.py
|
py
| 1,854 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "collections.OrderedDict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 21,
"usage_type": "call"
}
] |
38902423747
|
import time
import subprocess
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import adafruit_rgb_display.st7789 as st7789
import pynmea2
import sys
from subprocess import Popen, PIPE
import serial
import io
# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = None
# COLORS
ORGANGE = "#ffa600"
WHITE = "#FFFFFF"
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 64000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# Create the ST7789 display:
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
width=135,
height=240,
x_offset=53,
y_offset=40,
)
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
image = Image.new("RGB", (width, height))
rotation = 90
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image, rotation)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Alternatively load a TTF font. Make sure the .ttf font file is in the
# same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype("/usr/share/fonts/truetype/noto/NotoMono-Regular.ttf", 24)
# Turn on the backlight
backlight = digitalio.DigitalInOut(board.D22)
backlight.switch_to_output()
backlight.value = True
while True:
iterator = 0
# Get output from std
with Popen(["gpspipe", "/dev/ttyACM0", "-r"], stdout=PIPE, bufsize=1,
universal_newlines=True) as p:
for line in p.stdout:
iterator = iterator + 1
# counting up to 4 lines ignores the headers that would otherwise be unfit for parsing
if(iterator >= 4):
gpsmsg = pynmea2.parse(line)
# handling errors is critical - program will fail without this step
try:
latitude = gpsmsg.lat
longitude = gpsmsg.lon
#altitude = gpsmsg.alt
y = top
if(latitude != 0 and latitude != "null" and latitude != "NULL" and latitude != "" and latitude != " "):
#print(latitude)
draw.rectangle((0, 0, width, height), outline=0, fill=0)
#Write GPS data to screen
#y = top
draw.text((x, y), "LAT: " + latitude, font=font, fill=WHITE)
y += font.getsize("LAT")[1]
if(longitude != 0 and longitude != "null" and longitude != "NULL" and longitude != "" and longitude != " "):
draw.text((x, y), "LON: " + longitude, font=font, fill=WHITE)
y += font.getsize("LON")[1]
#if(altitude != 0 and altitude != "null" and altitude != "NULL" and altitude != "" and altitude != " "):
#draw.text((x, y), "ALT: " + altitude, font=font, fill=WHITE)
#y += font.getsize("ALT")[1]
except:
print("cannot parse that")
#pass
disp.image(image, rotation)
time.sleep(0.1)
|
vwls/toolbox
|
gps_data_to_screen.py
|
gps_data_to_screen.py
|
py
| 4,226 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "digitalio.DigitalInOut",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "board.CE0",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "digitalio.DigitalInOut",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "board.D25",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "board.SPI",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "adafruit_rgb_display.st7789.ST7789",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "adafruit_rgb_display.st7789",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "digitalio.DigitalInOut",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "board.D22",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pynmea2.parse",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 122,
"usage_type": "call"
}
] |
33415585016
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
import scipy.optimize as optimize
# Opening image
img = cv.imread("red.png")
# Uncomment this and run the program to make sure the
# convex_hull_pointing_up algorithm works
# img = cv.rotate(img, cv.ROTATE_180)
# OpenCV stores images as BGR by default
# so the following two lines flip the color channels
# to RGB and HSV
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# Create the environment of the picture
plt.subplot(1, 1, 1)
plt.imshow(img_rgb)
# Defining thresholds to isolate the HSV pixels that
# have the desired color
img_thresh_low = cv.inRange(img_hsv, np.array([0, 135, 135]), np.array([15, 255, 255]))
img_thresh_high = cv.inRange(img_hsv, np.array([159, 135, 135]), np.array([179, 255, 255]))
# Add the two threshold maps together
img_thresh = cv.bitwise_or(img_thresh_low, img_thresh_high)
# Use erosion followed by dilation to remove noise
kernel = np.ones((5, 5))
img_thresh_opened = cv.morphologyEx(img_thresh, cv.MORPH_OPEN, kernel)
# Blur the image slightly
img_thresh_blurred = cv.medianBlur(img_thresh_opened, 5)
# Find edges with the Canny edge detection algorithm
img_edges = cv.Canny(img_thresh_blurred, 70, 255)
# Get contours
contours, _ = cv.findContours(np.array(img_edges), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# Approximate contours using the Douglas-Peucker algorithm
approx_contours = []
for c in contours:
approx = cv.approxPolyDP(c, 5, closed=True)
approx_contours.append(approx)
# Find convex hulls of the contours
all_convex_hulls = []
for ac in approx_contours:
all_convex_hulls.append(cv.convexHull(ac))
# Remove any hulls with more than 10 or less than 3 points
convex_hulls_3to10 = []
for ch in all_convex_hulls:
if 3 <= len(ch) <= 10:
convex_hulls_3to10.append(cv.convexHull(ch))
# Define a function to check if a hull is pointing up
def convex_hull_pointing_up(ch: np.ndarray) -> bool:
points_above_center, points_below_center = [], []
_, y, _, h = cv.boundingRect(ch)
vertical_center = y + h / 2
for point in ch:
if point[0][1] < vertical_center:
points_above_center.append(point)
elif point[0][1] >= vertical_center:
points_below_center.append(point)
x_above, _, w_above, _ = cv.boundingRect(np.array(points_above_center))
x_below, _, w_below, _ = cv.boundingRect(np.array(points_below_center))
return x_above <= x_below + w_below and x_above + w_above <= x_below + w_below \
and x_above >= x_below and x_above + w_above >= x_below
cones = []
bounding_rects = []
# Filter out the contours that aren't pointing up
for ch in convex_hulls_3to10:
if convex_hull_pointing_up(ch):
cones.append(ch)
rect = cv.boundingRect(ch)
bounding_rects.append(rect)
img_res = img_rgb.copy()
# Draw rectangles around the identified cones
for rect in bounding_rects:
x, y, w, h = rect
cv.rectangle(img_res, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Fit best-fit lines to the left and right sides of the screen
cone_points_left = [(rect[0] + rect[2] / 2, rect[1] + rect[3] / 2) for rect in bounding_rects if rect[0] + rect[2] / 2 < img_res.shape[1] / 2]
cone_points_right = [(rect[0] + rect[2] / 2, rect[1] + rect[3] / 2) for rect in bounding_rects if rect[0] + rect[2] / 2 > img_res.shape[1] / 2]
def least_squares(x, y):
# Create the least squares objective function.
def func(x, a, b):
return a * x + b
popt, pcov = optimize.curve_fit(func, x, y)
return popt
# Get best fit lines for these points
a1, b1 = least_squares(np.array([i[0] for i in cone_points_left]), np.array([i[1] for i in cone_points_left]))
a2, b2 = least_squares(np.array([i[0] for i in cone_points_right]), np.array([i[1] for i in cone_points_right]))
# Draw the best-fit lines on the image
cv.line(img_res, [0, int(b1)], [3000, int((3000 * a1) + b1)], (255, 1, 1), 5)
cv.line(img_res, [0, int(b2)], [3000, int((3000 * a2) + b2)], (255, 1, 1), 5)
# Display and save the final output image
plt.imshow(img_res)
plt.savefig("answer.png")
plt.show()
|
IamParvSinghal/Wisconsin_Autonomous
|
CV.py
|
CV.py
|
py
| 4,280 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "cv2.inRange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_or",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "cv2.medianBlur",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.approxPolyDP",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.convexHull",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.convexHull",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "cv2.boundingRect",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.curve_fit",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "scipy.optimize",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
}
] |
11296078217
|
import requests
import json
data = {
"a": "GetBidYiDong",
"Token": "29db4b581d67ec1c46a231e09e919671",
"c": "StockBidYiDong",
"UserID": 19,
"day": "20171026"
}
url = "https://hq.kaipanla.com/w1/api/index.php"
respone = requests.post(url, data)
respone.encoding = "unicode_escape"
result = respone.text
print(result)
result = result.replace("zhangfu", "涨幅").replace("BidAmount", "成交").replace("sjltp", "市值")\
.replace("BidAmount", "净额").replace("BuyP", "大单占比").replace("Buy100", "买入")\
.replace("Sell100", "卖出").replace("BidAmount", "净额").replace("Plate", "概念")
for p in json.loads(result)["List"]:
print("--------------------------------------------------------------------------------")
print(p)
|
mykright/auto_stock_search
|
竞价.py
|
竞价.py
|
py
| 778 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
}
] |
70063403388
|
from multiprocessing import context
from pwn import *
from LibcSearcher import *
context.log_level = 'debug'
# p=process('./pwn')
p=remote('t.ctf.qwq.cc',49468)
pause()
elf=ELF('./pwn')
context.arch ='amd64'
context.bits=64
shellcode=asm('push 0x68;mov rax ,0x68732f6e69622f;push rax;mov rdi,rsp;xor rsi, rsi;xor rdx, rdx;xor rax,rax;add rax,0x3b;syscall')
p.recvuntil(b'Do u know what\'s is it?\n')
payload=shellcode.ljust(0x38,b'a')+b'\x2a'
# payload=b'a'*0x30#+p64(0x7ffdadf3ddf0)
p.send(payload)
p.interactive()
|
CookedMelon/mypwn
|
NPU/shellcode/exp.py
|
exp.py
|
py
| 520 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "multiprocessing.context.log_level",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.context",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "multiprocessing.context.arch",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.context",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "multiprocessing.context.bits",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.context",
"line_number": 11,
"usage_type": "name"
}
] |
37197760033
|
from datetime import datetime
class Greeter:
def __init__(self, name):
self.name = name
def day():
return datetime.now().strftime('%A')
def part_of_day(): # Определяет часть, дня основываясь на текущем часе
current_hour = datetime.now().hour
if current_hour < 12:
part_of_day = "утра"
elif 12 <= current_hour < 17:
part_of_day = 'дня'
else:
part_of_day = 'вечера'
return part_of_day
def greet(self, store): # Выводит приветствие, используя все расчетные составляющие
print(f'Здраствуйте, меня зовут {self.name}, и добро пожаловать'
f'в {store}!')
print(f'Желаем вам приятного {day()} {part_of_day()}')
print('Дарим вам купон на скидку 20%!')
|
alecksandr-slavin/git_work
|
stepick_v1/new.py
|
new.py
|
py
| 980 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
}
] |
4886937902
|
import numpy as np
import torch
def ious(box, boxes, isMin = False):#定义iou函数
box_area = (box[3] - box[1]) * (box[4] - box[2])#计算自信度最大框的面积
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 4] - boxes[:, 2])#计算其他所有框的面积
xx1 = torch.max(box[1], boxes[:, 1])#计算交集左上角x的坐标其他同理
yy1 = torch.max(box[2], boxes[:, 2])
xx2 = torch.min(box[3], boxes[:, 3])
yy2 = torch.min(box[4], boxes[:, 4])
# w = torch.max(0, xx2 - xx1)
# h = torch.max(0, yy2 - yy1)#获取最大值也可以用下面这种方法
w = torch.clamp(xx2 - xx1, min=0)#获取最大值
h = torch.clamp(yy2 - yy1, min=0)
inter = w * h#计算交集面积
# ovr1 = inter/torch.min(box_area, area)
ovr2 = inter/ (box_area + area - inter)#交集面积/并集面积
# ovr = torch.max(ovr2,ovr1)
# if isMin:#用于判断是交集/并集,还是交集/最小面积(用于处理大框套小框的情况)
#
# ovr = inter / torch.min(box_area, area)
# else:
# ovr = inter / (box_area + area - inter)
return ovr2
def nms(boxes, thresh=0.5, isMin = True):#定义nms函数并传3个参数,分别是框,自信度阀值,是否最小面积
if boxes.shape[0] == 0:#获取框的个是看是否为0,为0没框就返回一个空的数组防止代码报错
return torch.Tensor([])
_boxes = boxes[(-boxes[:, 0]).argsort()]#对框进行排序按自信度从大到小的顺序
r_boxes = []#定义一个空的列表用来装合格的框
while _boxes.shape[0] > 1:#循环框的个数
a_box = _boxes[0]#取出第一个(自信度最大的框)框最为目标框与 其他框做iou
b_boxes = _boxes[1:]#取出剩下的所有框
r_boxes.append(a_box)#将第一个框添加到列表
# print(iou(a_box, b_boxes))
index = torch.where(ious(a_box, b_boxes,isMin) < thresh)#对框做iou将满足iou阀值条件的框留下并反回其索引
_boxes = b_boxes[index]#根据索引取框并赋值给_boxes,使其覆盖原来的_boxes
if _boxes.shape[0] > 0:#判断是否剩下最后一个框
r_boxes.append(_boxes[0])#将最后一个框,说明这是不同物体,并将其放进列表
return torch.stack(r_boxes)
if __name__ == '__main__':
# a = np.array([1,1,11,11])
# bs = np.array([[1,1,10,10],[11,11,20,20]])
# print(iou(a,bs))
bs = torch.tensor([[1, 1, 10, 10, 40,8], [1, 1, 9, 9, 10,9], [9, 8, 13, 20, 15,3], [6, 11, 18, 17, 13,2]])
# print(bs[:,3].argsort())
print(nms(bs))
|
RockingHorse-L/yolov3
|
YOLOV3/tool1.py
|
tool1.py
|
py
| 2,668 |
python
|
zh
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "torch.max",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.where",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 61,
"usage_type": "call"
}
] |
26053687100
|
from itertools import permutations
vowels = ["а"]
consonants = ["б", "т", "с"]
result = set()
for index, i in enumerate(permutations("аббатиса")):
correct = True
for symbol_index in range(0, len(i) - 1):
if (i[symbol_index] in vowels and i[symbol_index + 1] in vowels) or \
(i[symbol_index] in consonants and i[symbol_index + 1] in consonants):
correct = False
break
if correct:
result.add(i)
print(len(result))
|
Woolfer0097/UGE_IT
|
8 task/235.py
|
235.py
|
py
| 495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.permutations",
"line_number": 7,
"usage_type": "call"
}
] |
17430789952
|
#!/usr/bin/python
# https://www.udemy.com/course/complete-python-developer-zero-to-mastery/
# 246. Hacker News Project
# https://www.synerzip.com/blog/web-scraping-introduction-applications-and-best-practices/
# https://www.crummy.com/software/BeautifulSoup/
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# https://scrapy.org/
# https://developer.mozilla.org/en-US/docs/Learn/CSS/Building_blocks/Selectors
# https://www.w3schools.com/css/css_selectors.asp
# https://www.w3schools.com/cssref/css_selectors.asp
# https://docs.python.org/3/library/pprint.html
# pip install beautifulsoup4
# pip install requests
from bs4 import BeautifulSoup
import requests
import pprint
RES= 'https://news.ycombinator.com/news'
POINTS = 200
res = requests.get(RES)
soup = BeautifulSoup(res.text, 'html.parser')
links = soup.select('.storylink')
subtext = soup.select('.subtext')
for i in range(2,4):
res = requests.get(RES+'?p='+str(i))
soup = BeautifulSoup(res.text, 'html.parser')
links += soup.select('.storylink')
subtext += soup.select('.subtext')
def sort_stories_by_votes(hnlist):
return sorted(hnlist, key=lambda k:k['points'], reverse=True)
def create_custom_hn(links, subtext):
hn = []
for idx, item in enumerate(links):
title= item.getText()
href=item.get('href', None)
vote=subtext[idx].select('.score')
if len(vote):
points=int(vote[0].getText().replace(' points', ''))
if points >= POINTS:
hn.append({'title':title, 'link':href, 'points':points})
return sort_stories_by_votes(hn)
hacker_news= create_custom_hn(links, subtext)
pprint.pprint(hacker_news)
|
olexandrch/UdemyCompletePythonDeveloper
|
Sec.18 246 Hacker News Project.py
|
Sec.18 246 Hacker News Project.py
|
py
| 1,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 56,
"usage_type": "call"
}
] |
37431499468
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: Dlink DIAGNOSTIC.PHP命令执行
referer: https://www.exploit-db.com/exploits/24956
author: Lucifer
description: Some D-Link Routers are vulnerable to OS Command injection in the web interface.
On DIR-645 versions prior 1.03 authentication isn't needed to exploit it. On version 1.03 authentication is needed in order to trigger the vulnerability,
which has been fixed definitely on version 1.04. Other D-Link products, like DIR-300 rev B and DIR-600, are also affected by this vulnerability.
Not every device includes wget which we need for deploying our payload. On such devices you could use the cmd generic payload and try to start telnetd or execute other commands.
Since it is a blind OS command injection vulnerability, there is no output for the executed command when using the cmd generic payload. A ping command against a controlled system could be used for testing purposes.
This module has been tested successfully on DIR-645 prior to 1.03, where authentication isn't needed in order to exploit the vulnerability.
'''
import sys
import json
import requests
class router_dlink_command_exec_BaseVerify():
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
}
post_data = {
"act":"ping",
"dst":"www.baidu.com"
}
payload = "/diagnostic.php"
vulnurl = self.url + payload
try:
req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)
if r"<report>OK" in req.text:
return "[+]存在Dlink DIAGNOSTIC.PHP命令执行漏洞...(高危)\tpayload: "+vulnurl+"\npost: "+json.dumps(post_data, indent=4)
else:
return "[-]no vuln"
except:
return "[-] ======>连接超时"
if __name__ == "__main__":
testVuln = router_dlink_command_exec_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/hardware/router/router_dlink_command_exec.py
|
router_dlink_command_exec.py
|
py
| 2,122 |
python
|
en
|
code
| 1,626 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 43,
"usage_type": "attribute"
}
] |
42095434916
|
from selenium import webdriver # driver de selenium
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium_stealth import stealth # Ayuda a evitar que las webs nos detecten que somos un bot
from shutil import which
def iniciar_webdriver(headless=True):# Arranca webdriver con Chrome y lo devuelve
options = Options()
if headless:
options.add_argument("--headless") # para ejecutar chromedriver, pero sin abrir la ventana
options.add_argument("--window-size=1000,1000") # Configurar dimension ventana alto y ancho
options.add_argument("--start-maximized") # para maximizar la ventana
options.add_argument("--disable-dev-shm-usage") # Importante para usar en Heroku | Para usar un directorio temporal para crear archivos anonimos de memoria copartida
options.add_argument("--disable-blink-features=AutomationControlled") # Para que el navigator.webdriver sea falso
options.add_argument("--log-level=3") # Para que no muestre nada en la terminal
lista = [
"enable-automation", # Para ocultar "Un software automatizado de pruebas esta controlando chrome"
"enable-logging", # Para ocultar Devtools
]
options.add_experimental_option("excludeSwitches", lista)
s = Service(which("chromedriver"))
driver = webdriver.Chrome(service=s, options=options) #añadimos el argumento Options
stealth(
driver,
languages=["es-ES", "es"],
vendor="Google Inc.",
platform="Win32",
webgl_vendor="Intel Inc.",
renderer="Intel Iris OpenGL Engine",
fix_hairline=True,)
return driver
|
Jonnathan1093/Telegram-Chatbot
|
ChatHeroku/iniciar_Webdriver.py
|
iniciar_Webdriver.py
|
py
| 1,666 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "shutil.which",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "selenium_stealth.stealth",
"line_number": 23,
"usage_type": "call"
}
] |
29250448134
|
from __future__ import annotations
import os
import unittest
from collections import defaultdict, namedtuple
from math import ceil
from typing import Any, Iterator
import numpy as np
from rtree.index import Index, Property, RT_TPRTree
class Cartesian(
namedtuple(
"Cartesian",
("id", "time", "x", "y", "x_vel", "y_vel", "update_time", "out_of_bounds"),
)
):
__slots__ = ()
def getX(self, t: float) -> float:
return self.x + self.x_vel * (t - self.time)
def getY(self, t: float) -> float:
return self.y + self.y_vel * (t - self.time)
def getXY(self, t: float) -> tuple[float, float]:
return self.getX(t), self.getY(t)
def get_coordinates(
self, t_now: float | None = None
) -> tuple[
tuple[float, float, float, float],
tuple[float, float, float, float],
float | tuple[float, float],
]:
return (
(self.x, self.y, self.x, self.y),
(self.x_vel, self.y_vel, self.x_vel, self.y_vel),
self.time if t_now is None else (self.time, t_now),
)
class QueryCartesian(
namedtuple("QueryCartesian", ("start_time", "end_time", "x", "y", "dx", "dy"))
):
__slots__ = ()
def get_coordinates(
self,
) -> tuple[
tuple[float, float, float, float],
tuple[float, float, float, float],
tuple[float, float],
]:
return (
(self.x - self.dx, self.y - self.dy, self.x + self.dx, self.y + self.dy),
(0, 0, 0, 0),
(self.start_time, self.end_time),
)
def data_generator(
dataset_size: int = 100,
simulation_length: int = 10,
max_update_interval: int = 20,
queries_per_time_step: int = 5,
min_query_extent: float = 0.05,
max_query_extent: float = 0.1,
horizon: int = 20,
min_query_interval: int = 2,
max_query_interval: int = 10,
agility: float = 0.01,
min_speed: float = 0.0025,
max_speed: float = 0.0166,
min_x: int = 0,
min_y: int = 0,
max_x: int = 1,
max_y: int = 1,
) -> Iterator[tuple[str, int, Any]]:
def create_object(
id_: float, time: float, x: float | None = None, y: float | None = None
) -> Cartesian:
# Create object with random or defined x, y and random velocity
if x is None:
x = np.random.uniform(min_x, max_x)
if y is None:
y = np.random.uniform(min_y, max_y)
speed = np.random.uniform(min_speed, max_speed)
angle = np.random.uniform(-np.pi, np.pi)
x_vel, y_vel = speed * np.cos(angle), speed * np.sin(angle)
# Set update time for when out of bounds, or max interval
for dt in range(1, max_update_interval + 1):
if not (0 < x + x_vel * dt < max_x and 0 < y + y_vel * dt < max_y):
out_of_bounds = True
update_time = time + dt
break
else:
out_of_bounds = False
update_time = time + max_update_interval
return Cartesian(id_, time, x, y, x_vel, y_vel, update_time, out_of_bounds)
objects = list()
objects_to_update = defaultdict(set)
for id_ in range(dataset_size):
object_ = create_object(id_, 0)
objects.append(object_)
objects_to_update[object_.update_time].add(object_)
yield "INSERT", 0, object_
for t_now in range(1, simulation_length):
need_to_update = ceil(dataset_size * agility)
updated_ids = set()
while need_to_update > 0 or objects_to_update[t_now]:
kill = False
if objects_to_update[t_now]:
object_ = objects_to_update[t_now].pop()
if object_ not in objects:
continue
kill = object_.out_of_bounds
else:
id_ = np.random.randint(0, dataset_size)
while id_ in updated_ids:
id_ = np.random.randint(0, dataset_size)
object_ = objects[id_]
updated_ids.add(object_.id)
need_to_update -= 1
yield "DELETE", t_now, object_
if kill:
x = y = None
else:
x, y = object_.getXY(t_now)
object_ = create_object(object_.id, t_now, x, y)
objects[object_.id] = object_
objects_to_update[object_.update_time].add(object_)
yield "INSERT", t_now, object_
for _ in range(queries_per_time_step):
x = np.random.uniform(min_x, max_x)
y = np.random.uniform(min_y, max_y)
dx = np.random.uniform(min_query_extent, max_query_extent)
dy = np.random.uniform(min_query_extent, max_query_extent)
dt = np.random.randint(min_query_interval, max_query_interval + 1)
t = np.random.randint(t_now, t_now + horizon - dt)
yield "QUERY", t_now, QueryCartesian(t, t + dt, x, y, dx, dy)
def intersects(
x1: float, y1: float, x2: float, y2: float, x: float, y: float, dx: float, dy: float
) -> bool:
# Checks if line from x1, y1 to x2, y2 intersects with rectangle with
# bottom left at x-dx, y-dy and top right at x+dx, y+dy.
# Implementation of https://stackoverflow.com/a/293052
# Check if line points not both more/less than max/min for each axis
if (
(x1 > x + dx and x2 > x + dx)
or (x1 < x - dx and x2 < x - dx)
or (y1 > y + dy and y2 > y + dy)
or (y1 < y - dy and y2 < y - dy)
):
return False
# Check on which side (+ve, -ve) of the line the rectangle corners are,
# returning True if any corner is on a different side.
calcs = (
(y2 - y1) * rect_x + (x1 - x2) * rect_y + (x2 * y1 - x1 * y2)
for rect_x, rect_y in (
(x - dx, y - dy),
(x + dx, y - dy),
(x - dx, y + dy),
(x + dx, y + dy),
)
)
sign = np.sign(next(calcs)) # First corner (bottom left)
return any(np.sign(calc) != sign for calc in calcs) # Check remaining 3
class TPRTests(unittest.TestCase):
def test_tpr(self) -> None:
# TODO : this freezes forever on some windows cloud builds
if os.name == "nt":
return
# Cartesians list for brute force
objects = dict()
tpr_tree = Index(properties=Property(type=RT_TPRTree))
for operation, t_now, object_ in data_generator():
if operation == "INSERT":
tpr_tree.insert(object_.id, object_.get_coordinates())
objects[object_.id] = object_
elif operation == "DELETE":
tpr_tree.delete(object_.id, object_.get_coordinates(t_now))
del objects[object_.id]
elif operation == "QUERY":
tree_intersect = set(tpr_tree.intersection(object_.get_coordinates()))
# Brute intersect
brute_intersect = set()
for tree_object in objects.values():
x_low, y_low = tree_object.getXY(object_.start_time)
x_high, y_high = tree_object.getXY(object_.end_time)
if intersects(
x_low,
y_low,
x_high,
y_high, # Line
object_.x,
object_.y,
object_.dx,
object_.dy,
): # Rect
brute_intersect.add(tree_object.id)
# Tree should match brute force approach
assert tree_intersect == brute_intersect
|
Toblerity/rtree
|
tests/test_tpr.py
|
test_tpr.py
|
py
| 7,681 |
python
|
en
|
code
| 573 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterator",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "numpy.sign",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "os.name",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "rtree.index.Index",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "rtree.index.Property",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "rtree.index.RT_TPRTree",
"line_number": 196,
"usage_type": "name"
}
] |
15200494736
|
#!/usr/bin/env python
from numpy import array
from math import sqrt
from pyspark import SparkContext
# from pyspark.mllib.clustering import KMeans, KMeansModel
from pyspark.mllib.clustering import KMeans
sc = SparkContext(appName="Kmeans Pyspark")
# Load and parse the data
data = sc.textFile("hdfs://localhost:9000/features/w2v/value_only")
parsedData = data.map(lambda line: array([float(x) for x in line.strip(' ').split(' ')]))
# Build the model (cluster the data)
clusters = KMeans.train(parsedData, 2, maxIterations=10, initializationMode="random")
# Evaluate clustering by computing Within Set Sum of Squared Errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = parsedData.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Within Set Sum of Squared Error = " + str(WSSSE))
# Save and load model
clusters.save(sc, "hdfs://localhost:9000/kmeans/model")
# sameModel = KMeansModel.load(sc, "target/org/apache/spark/PythonKMeansExample/KMeansModel")
|
sindongboy/topinion
|
python/lda.py
|
lda.py
|
py
| 1,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyspark.SparkContext",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyspark.mllib.clustering.KMeans.train",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyspark.mllib.clustering.KMeans",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 26,
"usage_type": "call"
}
] |
31344968208
|
from tkinter import *
import tkinter, threading
from tkinter import messagebox as tmsg
from tkinter import ttk
import random
import datetime
import imageio
import time
from PIL import Image, ImageTk
import smtplib as s
import config
root1 = Tk()
root1.geometry("1208x830")
root1.wm_iconbitmap("Artboard 1.ico")
root1.minsize(1208, 830)
root1.maxsize(1208, 830)
root1.title("RYOCO TOURS© 2021")
video_name = "video1.mp4" #This is your video file path
video = imageio.get_reader(video_name)
def dr1():
time.sleep(2)
root1.destroy()
def login_window():
root2 =Toplevel()
# global dr1, root3
root2.geometry("1202x802")
root2.minsize(1202, 802)
root2.maxsize(1202, 802)
root2.wm_iconbitmap("Artboard 1.ico")
root2.title("RYOCO SIGNIN")
load = Image.open("LOGIN1.png")
render = ImageTk.PhotoImage(load)
jpg = Label(root2, image=render)
jpg.place(x=0, y=0)
#def statusr1():
# for i in range(2):
# statusvar.set("Busy.")
# statusbar.update()
#
# time.sleep(0.2)
# statusvar.set("Busy..")
# statusbar.update()
#
# time.sleep(0.2)
# statusvar.set("Busy...")
# statusbar.update()
#
# time.sleep(0.2)
# statusvar.set("Busy....")
# statusbar.update()
#
# time.sleep(0.2)
# x = 0
# for i in range(101):
# statusvar.set(f"LOADING {x}%")
# statusbar.update()
# time.sleep(0.0000001)
# x += 1
#
# statusvar.set("READY TO USE")
# statusbar.update()
# time.sleep(0.5)
# statusvar.set("PROCEED ENTERING YOUR DATA\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tRYOCO TOURS© 2021")
def register():
with open("ryoco_credentials.txt", 'r') as l:
m = l.read()
l.close()
if reg_name_entry.get() == "" or reg_cnic_entry.get() == '' or reg_contact_entry == "" or reg_email_entry == "" or reg_password_entry == '' or reg_username_entry == '':
tmsg.showerror('ENTRY INVALID', 'PLEASE ENTER COMPLETE DETAILS!', parent=root2)
elif len(reg_cnic_entry.get()) != 13:
tmsg.showerror('ENTRY INVALID', 'CNIC NOT VALID!', parent=root2)
elif len(reg_contact_entry.get()) != 11:
tmsg.showerror('ENTRY INVALID', 'CONTACT NOT VALID!', parent=root2)
# elif reg_username_entry in m:
# tmsg.showerror('ENTRY INVALID', 'USERNAME ALREADY REGISTERED!', parent=root2)
else:
a = tmsg.showinfo("Registeration Successful", "Your information has been registered!", parent=root2)
print(f'''
======================================================================================
First Name: {reg_name_entry.get()}
CNIC No : \t {reg_cnic_entry.get()}
Contact: \t {reg_contact_entry.get()}
E-mail: \t {reg_email_entry.get()}
\nCONFIDENTIAL DETAILS
username:\t {reg_username_entry.get()}'
Password: {reg_password_entry.get()}
''')
with open(f"{reg_username_entry.get()}_Record.txt", "a") as f:
try:
f.write(f'''
=========================================================================
First Name: {reg_name_entry.get()}
CNIC No : \t {reg_cnic_entry.get()}
Contact: \t {reg_contact_entry.get()}
E-mail: \t {reg_email_entry.get()}
\nCONFIDENTIAL DETAILS
username:\t {reg_username_entry.get()}
Password: {reg_password_entry.get()}
''')
f.close()
except:
pass
with open("ryoco_credentials.txt", 'a') as j:
j.write(f"{reg_username_entry.get()}:{reg_password_entry.get()}\n")
j.close()
def login():
with open("ryoco_credentials.txt", 'r') as f:
e = f.read().splitlines()
f.close()
usernames = []
passwords = []
for items in e:
e1 = items.split(":")[0]
e2 = items.split(":")[1]
usernames.insert(1, e1)
passwords.insert(1, e2)
if username_entry.get() not in usernames:
tmsg.showerror("ERROR", "INVALID USERNAME", parent=root2)
elif password_entry.get() not in passwords:
tmsg.showerror("ERROR", "INVALID PASSWORD", parent=root2)
else:
global render1
c = tmsg.showinfo("LOGIN SUCCESSFUL", "LOGIN SUCCESSFUL",parent=root2)
root3 = Toplevel()
# root3.wm_iconbitmap("Artboard 1.ico")
root3.geometry("1202x802")
root3.minsize(1202, 802)
root3.maxsize(1202, 802)
root3.title("RYOCO BOOKING")
def logout():
root3.destroy()
login_window()
def LOCAL():
tmsg.showinfo('DETAILS', '''
\nTOUR-I:\tKumrat Valley (3-Days)
\t\t\tvia-Jahaz Band, Katora Lake
\nTOUR-II:\tFairy Meadows (4-Days)
\t\t\tvia-Naran, Hunza
\nTOUR-III:\tHunza (5-Days)
\t\t\tvia-Swat, Khunjerab''', parent=root3)
def INTERNATIONAL():
tmsg.showinfo('DETAILS', '''
TOUR-I: (14-DAYS)
Russia, Turkey, Dubai (290,000 PKR Per Person)
TOUR-II: (5-DAYS)
Mauritius Tour Package (225,000 PKR Per Person)
TOUR-III: (05-Days)
TURKEY (45,000 PKR Per Person)''', parent=root3)
def dr3():
root3.destroy()
root2.destroy()
load1 = Image.open("booking1.png")
render1 = ImageTk.PhotoImage(load1)
jpg1 = Label(root3, image=render1)
jpg1.place(x=0, y=0)
# def statusr2():
# x = 0
# for i in range(101):
# statusvar.set(f"LOADING {x}%")
# statusbar.update()
# time.sleep(0.000001)
# x += 1
# statusvar.set("READY TO USE")
# statusbar.update()
# time.sleep(0.5)
# statusvar.set(f"\t\t{date_of_booking}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tRYOCO TOURS© 2021")
def BOOK():
global listbox, login_window
# ============================================ MAILING ===========================================================
with open(f'{reg_username_entry}_trip.txt', "a") as b:
try:
b.write(f'''
CLIENT: {reg_name_entry.get()}
CNIC: {reg_cnic_entry.get()}
USERNAME: {reg_username_entry.get()}
PASSWORD: {reg_password_entry.get()}
TOUR: {tour.get()}
TYPE: {type.get()}
DEPARTURE: {departure.get()}
Time: {time_of_booking}, {date_of_booking}''')
except:
b.write(f'''
TOUR: {tour.get()}
TYPE: {type.get()}
DEPARTURE: {departure.get()}
Time: {time_of_booking}, {date_of_booking}''')
b.close()
with open(f'Ryoco_records.txt', "w") as e:
try:
e.write(f'''
Dear MR/MRS.{reg_name_entry.get()}, CNIC NO. {reg_cnic_entry.get()}
Your username is "{reg_username_entry.get()}" and password is "{reg_password_entry.get()}"
Contact: {reg_contact_entry.get()}
---- Trip Details:
TOUR: {tour.get()}
TYPE: {type.get()}
DEPARTURE: {departure.get()}
Time: {time_of_booking}, {date_of_booking}
YOUR TOUR HAS BEEN BOOKED, OUR ADMINISTRATION WILL CONTACT YOU SHORTLY.
THANK YOU FOR TRUSTING US! :)
Regards,
RYOCO Tours(PVT), Limited.
''')
except:
print(username_entry.get())
e.write(f'''
Dear MR/MRS.{username_entry.get()}
Your username is "{username_entry.get()}" and password is "{password_entry.get()}"
---- Trip Details:
TOUR: {tour.get()}
TYPE: {type.get()}
DEPARTURE: {departure.get()}
Time: {time_of_booking}, {date_of_booking}
YOUR TOUR HAS BEEN BOOKED, OUR ADMINISTRATION WILL CONTACT YOU SHORTLY.
THANK YOU FOR TRUSTING US! :)
Regards,
RYOCO Tours(PVT), Limited.
''')
e.close()
if mail.get() == True:
print('EMAIL SELECTED')
with open('mailingaddress.txt', 'w') as t:
t.write(f'{reg_email_entry.get()}')
t.close()
x = open('mailingaddress.txt', 'r')
mailing = x.read()
x.close()
p = open('Ryoco_records.txt', 'r')
contents = p.read()
p.close()
try:
mailfrom = '[email protected]'
mailto = mailing
subject = 'RYOCO: BOOKING DETIALS'
message = contents
msg = 'Subject: {}\n\n{}'.format(subject, message)
username = config.EMAIL_ADDRESS
password = config.PASSWORD
server = s.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(username, password)
server.sendmail(mailfrom, mailto, msg)
server.quit()
print("Successfully mailed")
except:
print('Failed to mail details')
else:
print('EMAIL NOT SELECTED')
listbox = Text(root3, height=2, width=15, bg='#6F4624', fg="white", font=("Montserrat", 18))
listbox.place(x=700, y=600)
listbox.delete("1.0", END)
if departure.get() == 'ISLAMABAD' and tour.get() == 'TOUR-I' and type.get() == 'local':
TotalCost1 = int('11499')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-I' and type.get() == 'local':
TotalCost1 = int('14999')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'KARACHI' and tour.get() == 'TOUR-I' and type.get() == 'local':
TotalCost1 = int('19999')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
# =======================================TOUR-II=============================================================
elif departure.get() == 'ISLAMABAD' and tour.get() == 'TOUR-II' and type.get() == 'local':
TotalCost1 = int('14999')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-II' and type.get() == 'local':
TotalCost1 = int('15499')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'KARACHI' and tour.get() == 'TOUR-II' and type.get() == 'local':
TotalCost1 = int('21999')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
# ========================================= TOUR-III =======================================================
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-III' and type.get() == 'local':
TotalCost1 = int('19499')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-III' and type.get() == 'local':
TotalCost1 = int('19999')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'KARACHI' and tour.get() == 'TOUR-III' and type.get() == 'local':
TotalCost1 = int('24999')
if class1.get() == 'business':
TotalCost1 += 5000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
# =========================================== INTERNATIONAL ==============================================
# ==========================================TOUR-I========================================================
elif departure.get() == 'ISLAMABAD' and tour.get() == 'TOUR-I' and type.get() == 'international':
TotalCost1 = int('299999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-I' and type.get() == 'international':
TotalCost1 = int('294999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'KARACHI' and tour.get() == 'TOUR-I' and type.get() == 'international':
TotalCost1 = int('289999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
# ==========================================TOUR-II========================================================
elif departure.get() == 'ISLAMABAD' and tour.get() == 'TOUR-II' and type.get() == 'international':
TotalCost1 = int('234999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-II' and type.get() == 'international':
TotalCost1 = int('229999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'KARACHI' and tour.get() == 'TOUR-II' and type.get() == 'international':
TotalCost1 = int('224999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
# ==========================================TOUR-III========================================================
elif departure.get() == 'ISLAMABAD' and tour.get() == 'TOUR-III' and type.get() == 'international':
TotalCost1 = int('54999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'LAHORE' and tour.get() == 'TOUR-III' and type.get() == 'international':
TotalCost1 = int('49999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
elif departure.get() == 'KARACHI' and tour.get() == 'TOUR-III' and type.get() == 'international':
TotalCost1 = int('44999')
if class1.get() == 'business':
TotalCost1 += 50000
Tax = int((TotalCost1 / 100) * 5)
SubTotal1 = int(Tax + TotalCost1)
print(f"Cost for trip: Rs{TotalCost1}(tax exclusive)")
print(f'Total Cost: Rs{SubTotal1}')
listbox.insert(END, f'Rs.{TotalCost1}/-\n+{Tax}/-(tax)')
local_button = Radiobutton(root3, text="LOCAL", font=("Montserrat Black", 18, "italic"), variable=type,
value="local", bg="#FFAA00", fg="#623D28")
local_button.place(x=30, y=580)
international_button = Radiobutton(root3, text="INTERNATIONAL", font=("Montserrat Black", 18, "italic"),
variable=type, value="international", bg="#FFAA00", fg="#623D28")
international_button.place(x=165, y=580)
type_entry = ttk.Combobox(root3, width=50, textvariable=tour)
type_entry['value'] = ('TOUR-I', 'TOUR-II', 'TOUR-III')
type_entry.place(x=65, y=630)
departure_entry = ttk.Combobox(root3, width=30, textvariable=departure)
departure_entry['value'] = ('ISLAMABAD', 'LAHORE', 'KARACHI')
departure_entry.place(x=186, y=665)
economy_button = Radiobutton(root3, text="ECONOMY", font=("Montserrat Black", 18, "italic"), variable=class1,
value="economy", bg="#FFAA00", fg="#623D28")
economy_button.place(x=435, y=602)
business_button = Radiobutton(root3, text="BUSINESS", font=("Montserrat Black", 18, "italic"),
variable=class1,
value="business", bg="#FFAA00", fg="#623D28")
business_button.place(x=435, y=650)
book_button = Button(root3, text="BOOK RIDE", relief=FLAT, bg="#FCD34B", fg="#7F7F7F",
font=("TrashHand", 40),
command=BOOK)
book_button.place(x=985, y=490)
listbox = Text(root3, height=2, width=15, bg='#6F4624', fg="white", font=("Montserrat", 18))
listbox.place(x=700, y=600)
# listbox8.insert(END, time_of_booking)
local_button = Button(root3, text="LOCAL ", width=35, relief=FLAT, bg="#EA7415", fg="#6F4624",
font=("Bahnschrift Light", 25), command=LOCAL)
local_button.place(x=15, y=724)
international_button = Button(root3, text="INTERNATIONAL", width=33, relief=FLAT, bg="#EA7415", fg="#6F4624",
font=("Bahnschrift Light", 25), command=INTERNATIONAL)
international_button.place(x=585, y=724)
mailcheck = Checkbutton(root3, text="EMAIL BOOKING INFO", variable=mail, bg="#FFAA00", font=("Consolas", 13))
mailcheck.place(x=980, y=665)
logout_button = Button(root3, width=15, text="🔙 LOGOUT ", bg="#EA7415", font=("Montserrat SemiBold", 14),relief=FLAT,command=logout)
logout_button.place(x=950, y=36)
# statusvar = StringVar()
# statusvar.set("\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tRYOCO TOURS© 2021")
# statusbar = Label(root3, textvariable=statusvar, bg="#FEEEC6", relief=GROOVE, anchor=W)
# statusbar.pack(side=BOTTOM, fill=X)
# statusr2()
def admin():
root4 = Toplevel()
root4.geometry("1202x802")
root4.minsize(1202, 802)
root4.maxsize(1202, 802)
root4.title("ADMIN CONSOLE")
root2.destroy()
root4.mainloop()
# statusvar = StringVar()
# statusvar.set("READY")
# statusbar = Label(root2, textvariable=statusvar, bg="#FEEEC6", relief=GROOVE, anchor=W)
# statusbar.pack(side=BOTTOM, fill=X)
# ==========================================REGISTERIES========================================
reg_name_entry = StringVar()
reg_cnic_entry = StringVar()
reg_contact_entry = StringVar()
reg_email_entry = StringVar()
reg_username_entry = StringVar()
reg_password_entry = StringVar()
username_entry = StringVar()
password_entry = StringVar()
CNIC = StringVar()
CNIC.set("0")
# ================================================LOGIN=================================================================
username_entryx = Entry(root2, width=23, bg="#F8C43A", textvariable=username_entry, font=("Gill Sans MT", 12))
username_entryx.place(x=985, y=667)
password_entryx = Entry(root2, width=23, bg="#F8C43A", textvariable=password_entry, font=("Gill Sans MT", 12), show="*")
password_entryx.place(x=985, y=720)
# =============================================REGISERATION=============================================================
reg_username_entryx = Entry(root2, width=25, bg="#F8C43A", textvariable=reg_username_entry, font=("Gill Sans MT", 12))
reg_username_entryx.place(x=275, y=669)
reg_password_entryx = Entry(root2, width=25, bg="#F8C43A", textvariable=reg_password_entry, font=("Gill Sans MT", 12))
reg_password_entryx.place(x=275, y=723)
reg_name_entryx = Entry(root2, width=25, bg="#F8C43A", textvariable=reg_name_entry, font=("Gill Sans MT", 12))
reg_name_entryx.place(x=275, y=408)
reg_cnic_entryx = Entry(root2, width=36, bg="#F8C43A", textvariable=reg_cnic_entry, font=("Montserrat SemiBold", 12))
reg_cnic_entryx.place(x=80, y=500)
reg_contact_entryx = Entry(root2, width=18, bg="#F8C43A", textvariable=reg_contact_entry, font=("Montserrat SemiBold", 12))
reg_contact_entryx.place(x=275, y=560)
reg_email_entryx = Entry(root2, width=22, bg="#F8C43A", textvariable = reg_email_entry, font=("Consolas", 12))
reg_email_entryx.place(x=275, y=615)
check = Radiobutton(root2, variable=CNIC, bg="#F7BA11", value="cnic")
check.place(x=75, y=452)
check1 = Radiobutton(root2, variable=CNIC, bg="#F7C235", value="passport")
check1.place(x=235, y=452)
login_button = Button(root2, width=16, text="LOGIN ", bg="#C6633C", font=("Montserrat SemiBold", 12), relief=FLAT,
command=login)
login_button.place(x=880, y=756)
reg_button = Button(root2, width=16, text="REGISTER ", bg="#C6633C", font=("Montserrat SemiBold", 12), relief=FLAT,
command=register)
reg_button.place(x=164, y=756)
# admin_button = Button(root2, width=16, text="ADMIN ", bg="#C6633C", font=("Montserrat SemiBold", 12), relief=FLAT,
# command=admin)
# admin_button.place(x=980, y=40)
# statusr1()
# ================================================ BOOKING TAB =========================================================
mail = IntVar()
local = StringVar()
international = StringVar()
class1 = StringVar()
type = StringVar()
tour = StringVar()
departure = StringVar()
type.set('0')
class1.set('0')
root2.mainloop()
try:
dr1()
except:
pass
def stream(label):
for image in video.iter_data():
frame_image = ImageTk.PhotoImage(Image.fromarray(image))
label.config(image=frame_image)
label.image = frame_image
#====================================== win-0 =====================================
time_of_booking = (time.strftime("%I:%M:%S %p"))
date_of_booking = (time.strftime("%d/%m/%Y"))
def begin():
Button(text="BEGIN EXPLORING!", font=("TrashHand 29"), height=2, width=20, bg="#FFBB56", command=login_window, relief=FLAT).place(x=45, y=680)
my_label = Label(root1)
my_label.pack()
thread = threading.Thread(target=stream, args=(my_label,))
thread.daemon = 1
thread.start()
begin()
root1.mainloop()
|
zuwanish/Tour-Management-System
|
MAIN PROJECT GUI BASED.py
|
MAIN PROJECT GUI BASED.py
|
py
| 30,680 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "imageio.get_reader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "config.EMAIL_ADDRESS",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "config.PASSWORD",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "smtplib.SMTP",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 522,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 526,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 679,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 679,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 692,
"usage_type": "call"
}
] |
21433461529
|
import csv
import logging
import os
logger = logging.getLogger("app_logger")
# Channels with extra long videos messing up the stats and to be deleted,
# or other channels you just don't want to include.
CHANNELS_NOT_TO_IMPORT = ["4k SCREENSAVERS", "Nature Relaxation Films", "4K Relaxation Channel"]
# Some extra long videos can be deleted but in that case these channels won't be touched.
IMPORTANT_CHANNELS = ["Lex Fridman", "Andrew Huberman"]
# Insert the data from the CSV file into the database.
def insert_data_into_database(conn, c):
csv_file = os.path.abspath(os.getenv("CSV_FILE"))
with open(csv_file, "r", encoding="UTF-8-sig") as csvfile:
reader = csv.DictReader(csvfile, delimiter=";")
inserted_videos = 0
inserted_channels = 0
inserted_actions = 0
skipped_videos = 0
logger.info("Inserting data into the database...")
# Loop over each row in the CSV file.
for row in reader:
if row["Channel"] in CHANNELS_NOT_TO_IMPORT:
skipped_videos += 1
continue
c.execute("SELECT id FROM activity WHERE action = ? AND timestamp = ?",
(row["Action"], row["Timestamp"]))
activity = c.fetchone()
# If the action is already in the activity table, this is not the first time the script is run.
if activity:
continue
c.execute("SELECT id, url FROM channel WHERE url = ?", (row["Channel URL"],))
channel = c.fetchone()
# If the channel doesn"t exist, insert it into the channels table.
if not channel:
channel_name = row["Channel"].strip()
c.execute("""INSERT INTO channel (name, url)
VALUES (?, ?)""", (channel_name , row["Channel URL"],))
channel_id = c.lastrowid
inserted_channels += 1
else:
channel_id = channel[0]
if "Title" in row and "URL" in row:
c.execute("SELECT id FROM video WHERE title = ? AND url = ?", (row["Title"], row["URL"]))
video = c.fetchone()
# If the video doesn"t exist, insert it into the videos table.
if not video:
c.execute("""INSERT INTO video (title, url, channel_id)
VALUES (?, ?, ?)""", (row.get("Title", None), row["URL"], channel_id))
video_id = c.lastrowid
inserted_videos += 1
else:
video_id = video[0]
c.execute("""INSERT INTO activity (action, timestamp, video_id, channel_id)
VALUES (?, ?, ?, ?)""", (row["Action"], row["Timestamp"], video_id, channel_id))
inserted_actions += 1
conn.commit()
logger.info(f"Actions inserted: {inserted_actions}")
logger.info(f"Unique videos inserted: {inserted_videos}")
logger.info(f"Unique channels inserted: {inserted_channels}")
if skipped_videos > 0:
logger.info(f"{skipped_videos} videos skipped because channels were defined as excluded")
# Many streams are 10+ hours which mess up the watch time stats. 4 hours seemed to be a good average for me.
def delete_extra_long_videos(conn, c, max_length=4):
min_length = max_length * 3600
excluded_condition = ''
if IMPORTANT_CHANNELS:
excluded_condition = f"AND channel.name NOT IN ({','.join('?' for _ in IMPORTANT_CHANNELS)})"
else:
excluded_condition = ''
query = f"""
DELETE FROM video
WHERE id IN (
SELECT video.id
FROM video
JOIN channel ON video.channel_id = channel.id
WHERE video.length > ?
{excluded_condition}
ORDER BY video.length DESC)
"""
c.execute(query, [min_length] + IMPORTANT_CHANNELS)
conn.commit()
if c.rowcount > 0:
logger.info(f"Deleted {c.rowcount} extra long videos.")
# Delete channels that have no videos and vice versa.
def delete_orphans(conn, c):
c.execute("DELETE FROM channel WHERE id NOT IN (SELECT DISTINCT channel_id FROM video)")
rowcount = c.rowcount
c.execute("DELETE FROM activity WHERE channel_id NOT IN (SELECT id FROM channel)")
c.execute("DELETE FROM video WHERE channel_id NOT IN (SELECT id FROM channel)")
c.execute("DELETE FROM video_stat WHERE video_id NOT IN (SELECT id FROM video)")
c.execute("DELETE FROM channel_stat WHERE channel_id NOT IN (SELECT id FROM channel)")
conn.commit()
if rowcount > 0:
logger.info(f"Deleted {c.rowcount} empty channels.")
|
arilaakso/viewinginsights
|
import_data_into_db.py
|
import_data_into_db.py
|
py
| 4,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 23,
"usage_type": "call"
}
] |
73815165306
|
from random import random
from time import time
from cachier import cachier
@cachier(next_time=True)
def _test_int_pickling(int_1, int_2):
"""Add the two given ints."""
return int_1 + int_2
def _test_int_pickling_compare(int_1, int_2):
"""Add the two given ints."""
return int_1 + int_2
def test_pickle_speed():
"""Test speeds"""
print("Comparing speeds of decorated vs non-decorated functions...")
num_of_vals = 1000
times = []
for i in range(1, num_of_vals):
tic = time()
_test_int_pickling_compare(i, i + 1)
toc = time()
times.append(toc - tic)
print(' - Non-decorated average = {:.8f}'.format(
sum(times) / num_of_vals))
_test_int_pickling.clear_cache()
times = []
for i in range(1, num_of_vals):
tic = time()
_test_int_pickling(i, i + 1)
toc = time()
times.append(toc - tic)
print(' - Decorated average = {:.8f}'.format(
sum(times) / num_of_vals))
@cachier()
def _test_single_file_speed(int_1, int_2):
"""Add the two given ints."""
# something that takes some memory
return [random() for _ in range(1000000)]
@cachier(separate_files=True)
def _test_separate_files_speed(int_1, int_2):
"""Add the two given ints."""
# something that takes some memory
return [random() for _ in range(1000000)]
def test_separate_files_vs_single_file():
_test_separate_files_speed.clear_cache()
_test_single_file_speed.clear_cache()
start_time = time()
for i in range(3):
for j in range(10):
_test_separate_files_speed(j, 2)
print(f'separate files time: {time() - start_time}')
start_time = time()
for i in range(3):
for j in range(10):
_test_single_file_speed(j, 2)
print(f'single file time: {time() - start_time}')
if __name__ == '__main__':
test_pickle_speed()
|
python-cachier/cachier
|
tests/speed_eval.py
|
speed_eval.py
|
py
| 1,903 |
python
|
en
|
code
| 470 |
github-code
|
6
|
[
{
"api_name": "cachier.cachier",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cachier.cachier",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cachier.cachier",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 68,
"usage_type": "call"
}
] |
40260766080
|
import gvar as gv
import corrfitter as cf
import numpy as np
import collections
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import MultipleLocator
matplotlib.use('Agg')
plt.rc("font",**{"size":18})
import datetime
import os
import pickle
import copy
#from plotting import *
import lsqfit
lsqfit.nonlinear_fit.set(fitter='gsl_multifit',alg='subspace2D',scaler='more',solver='cholesky')#,solver='cholesky')
####################################
maxiter=5000
#######################################################################################################
def read_setup(setup):
#Reads in setups, and strips out currents, parents and daughters, as well as which is which
daughters = []
currents = []
parents = []
for element in setup:
lab = element.split('-')
daughters.append(lab[0])
currents.append(lab[1])
parents.append(lab[2])
return(daughters,currents,parents)
######################################################################################################
def strip_list(l): #Strips elemenst from list l
stripped = ''
for element in l:
stripped = '{0}{1}'.format(stripped,element)
return(stripped)
######################################################################################################
def make_params(Fit,FitMasses,FitTwists,FitTs,daughters,currents,parents):
#Removes things we do not want to fit, specified by FitMasses, FitTwists, FitTs assumes parents have varing mass and daughters varing twist
j = 0
for i in range(len(Fit['masses'])):
if i not in FitMasses:
del Fit['masses'][i-j]
for element in set(parents):
del Fit['tmaxes{0}'.format(element)][i-j]
j += 1
j = 0
for i in range(len(Fit['twists'])):
if i not in FitTwists:
del Fit['twists'][i-j]
for element in set(daughters):
del Fit['tmaxes{0}'.format(element)][i-j]
j += 1
j = 0
for i in range(len(Fit['Ts'])):
if i not in FitTs:
del Fit['Ts'][i-j]
j += 1
return()
#######################################################################################################
def make_data(filename,binsize):
# Reads in filename.gpl, checks all keys have same configuration numbers, returns averaged data
print('Reading data, binsize = ', binsize)
dset = cf.read_dataset(filename,binsize=binsize)
sizes = []
for key in dset:
#print(key,np.shape(dset[key]))
sizes.append(np.shape(dset[key]))
if len(set(sizes)) != 1:
print('Not all elements of gpl the same size')
for key in dset:
print(key,np.shape(dset[key]))
return(gv.dataset.avg_data(dset))
######################################################################################################
def make_pdata(filename,models,binsize):
# Reads in filename.gpl, checks all keys have same configuration numbers, returns averaged data
print('Reading processed data, binsize = ', binsize)
dset = cf.read_dataset(filename,binsize=binsize)
sizes = []
for key in dset:
#print(key,np.shape(dset[key]))
sizes.append(np.shape(dset[key]))
if len(set(sizes)) != 1:
print('Not all elements of gpl the same size')
for key in dset:
print(key,np.shape(dset[key]))
return(cf.process_dataset(dset, models))
#######################################################################################################
def effective_mass_calc(tag,correlator,tp):
#finds the effective mass and amplitude of a two point correlator
M_effs = []
for t in range(2,len(correlator)-2):
thing = (correlator[t-2] + correlator[t+2])/(2*correlator[t])
if thing >= 1:
M_effs.append(gv.arccosh(thing)/2)
#M_effs is all positive masses, we now take a rolling average of 4, and find where this changes the least
rav = []
for i in range(len(M_effs)-4):
rav.append((M_effs[i] + M_effs[i+1] + M_effs[i+2] + M_effs[i+3])/4)
M_eff = rav[0]
diff = abs((rav[1] - rav[0]).mean)
for i in range(1,len(rav)-1):
if abs((rav[i+1]-rav[i]).mean) < diff:
diff = abs((rav[i+1]-rav[i]).mean)
M_eff = (rav[i] + rav[i+1])/2
return(M_eff)
######################################################################################################
def effective_amplitude_calc(tag,correlator,tp,M_eff,Fit,corr):
#finds the effective mass and amplitude of a two point correlator
tmin = Fit['tmin{0}'.format(corr)]
A_effs = []
if len(correlator) == tp:
tmin = 0
for t in range(tmin,tmin+len(correlator)):
numerator = correlator[t-tmin]
if numerator >= 0:
A_effs.append( gv.sqrt(numerator/(gv.exp(-M_eff*t)+gv.exp(-M_eff*(tp-t)))))
rav = []
for i in range(len(A_effs)-4):
rav.append((A_effs[i] + A_effs[i+1] + A_effs[i+2] + A_effs[i+3])/4)
A_eff = rav[0]
diff = abs((rav[1] - rav[0]).mean)
for i in range(1,len(rav)-1):
if abs((rav[i+1]-rav[i]).mean) < diff:
diff = abs((rav[i+1]-rav[i]).mean)
A_eff = (rav[i] + rav[i+1])/2
an = gv.gvar(Fit['an'])
if A_eff.sdev/A_eff.mean > 0.5:
print('Replaced A_eff for {0} {1} -> {2}'.format(tag,A_eff,an))
A_eff = an
return(A_eff)
########################################################################################
def effective_V_calc(corr,daughter,parent,correlator,dcorr,pcorr,Fit,mass,twist,pA_eff,dA_eff):
#finds the effective V_nn[0][0]
tp = Fit['tp']
T = Fit['Ts'][-1]
dtmin = Fit['tmin{0}'.format(daughter)]
ptmin = Fit['tmin{0}'.format(parent)]
Vtmin = Fit['{0}tmin'.format(corr)]
dcorr2 = []
pcorr2 = []
Vcorr2 = []
V_effs = []
#print(corr,daughter,parent,mass,twist)
if len(dcorr) == int(tp):
dcorr2 = dcorr
else:
for i in range(dtmin):
dcorr2.append(0)
dcorr2.extend(dcorr)
for i in range(int(tp/2)-len(dcorr2)+1):
dcorr2.append(0)
#print(dcorr2)
if len(pcorr) == int(tp):
pcorr2 = pcorr
else:
for i in range(ptmin):
pcorr2.append(0)
pcorr2.extend(pcorr)
for i in range(int(tp/2)-len(pcorr2)+1):
pcorr2.append(0)
#print(pcorr2)
if len(correlator) == int(tp):
Vcorr2 = correlator
else:
for i in range(Vtmin):
Vcorr2.append(0)
Vcorr2.extend(correlator)
for i in range(T-len(Vcorr2)+1):
Vcorr2.append(0)
#print(Vcorr2)
for t in range(T):
numerator = Vcorr2[t]*pA_eff*dA_eff
denominator = dcorr2[t]*pcorr2[T-t]
if numerator != 0 and denominator !=0:
V_effs.append(numerator/denominator)
rav = []
for i in range(len(V_effs)-4):
rav.append((V_effs[i] + V_effs[i+1] + V_effs[i+2] + V_effs[i+3])/4)
V_eff = rav[0]
diff = abs((rav[1] - rav[0]).mean)
for i in range(1,len(rav)-1):
if abs((rav[i+1]-rav[i]).mean) < diff:
diff = abs((rav[i+1]-rav[i]).mean)
if (rav[i] + rav[i+1]) > 0:
V_eff = (rav[i] + rav[i+1])/2
V = gv.gvar(Fit['{0}Vnn0'.format(corr)])
if abs((V_eff.mean-V).mean/(V_eff.mean-V).sdev) > 1:
print('Replaced V_eff for {0} m {1} tw {2}: {3} --> {4}'.format(corr,mass,twist,V_eff,V))
V_eff = V
return(V_eff)
#######################################################################################################
def SVD_diagnosis(Fit,models,corrs,svdfac,currents,SepMass):
binsize = Fit['binsize']
#Feed models and corrs (list of corrs in this SVD cut)
if list(set(corrs).intersection(currents)) ==[]:
filename = 'SVD/{0}{1}{2}{3}{4}{5}{6}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(corrs),binsize,SepMass)
else:
filename = 'SVD/{0}{1}{2}{3}{4}{5}{6}{7}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(corrs),strip_list(Fit['Ts']),binsize,SepMass)
#print(filename)
for corr in corrs:
if 'tmin{0}'.format(corr) in Fit:
filename += '{0}'.format(Fit['tmin{0}'.format(corr)])
for element in Fit['tmaxes{0}'.format(corr)]:
filename += '{0}'.format(element)
if '{0}tmin'.format(corr) in Fit:
filename += '{0}'.format(Fit['{0}tmin'.format(corr)])
#print(filename)
if os.path.isfile(filename) and os.path.getsize(filename) > 0:
pickle_off = open(filename,"rb")
svd = pickle.load(pickle_off)
print('Loaded SVD for {0} : {1:.2g} x {2} = {3:.2g}'.format(corrs,svd,svdfac,svd*svdfac))
pickle_off.close()
else:
print('Calculating SVD for {0}'.format(corrs))
s = gv.dataset.svd_diagnosis(cf.read_dataset('{0}{1}.gpl'.format(Fit['file_location'],Fit['filename']),binsize=binsize), models=models, nbstrap=20)
svd = s.svdcut
######## save plot ##########################
plt.figure()
x = s.val / s.val[-1]
ratio = s.bsval / s.val
idx = x > s.mincut
ratio = ratio[idx]
x = x[idx]
y = gv.mean(ratio)
yerr = gv.sdev(ratio)
plt.errorbar(x=x, y=y, yerr=yerr, fmt='+', color='b')
sig = (2. / len(s.val)) ** 0.5
plt.plot([x[0], x[-1]], [1. - sig, 1. - sig], 'k:')
plt.axhline(1,ls='--',color='k')
plt.axvline(s.svdcut,ls=':',color='g')
#plt.axvline(0.013,ls='--',color='g')
plt.xscale('log')
plt.savefig('svd_plots/{0}.pdf'.format(filename.split('/')[1]))
###############################################
pickle_on = open(filename,"wb")
print('Calculated SVD for {0} : {1:.2g} x {2} = {3:.2g}'.format(corrs,svd,svdfac,svd*svdfac))
pickle.dump(svd,pickle_on)
return(svd*svdfac)
#######################################################################################################
def make_models(Fit,FitCorrs,notwist0,non_oscillating,daughters,currents,parents,svdfac,Chained,allcorrs,links,parrlinks,SepMass,NoSVD=False):
#several forms [(A,B,C,D)],[(A,B),(C),(D)],[(A,B),[(C),(D)]]
#First make all models and then stick them into the correct chain
models = collections.OrderedDict()
tp = Fit['tp']
for corr in set(parents):
if corr in allcorrs:
models['{0}'.format(corr)] = []
for i,mass in enumerate(Fit['masses']):
tag = Fit['{0}-Tag'.format(corr)].format(mass)
models['{0}'.format(corr)].append(cf.Corr2(datatag=tag, tp=tp, tmin=Fit['tmin{0}'.format(corr)], tmax=Fit['tmaxes{0}'.format(corr)][i], a=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), b=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), dE=('dE:{0}'.format(tag), 'dE:o{0}'.format(tag)),s=(1,-1)))
for corr in set(daughters):
if corr in allcorrs:
models['{0}'.format(corr)] = []
for i,twist in enumerate(Fit['twists']):
tag = Fit['{0}-Tag'.format(corr)].format(twist)
if twist == '0' and corr in notwist0:
pass
elif twist == '0' and corr in non_oscillating:
models['{0}'.format(corr)].append(cf.Corr2(datatag=tag, tp=tp, tmin=Fit['tmin{0}'.format(corr)], tmax=Fit['tmaxes{0}'.format(corr)][i], a=('{0}:a'.format(tag)), b=('{0}:a'.format(tag)), dE=('dE:{0}'.format(tag))))
else:
models['{0}'.format(corr)].append(cf.Corr2(datatag=tag, tp=tp, tmin=Fit['tmin{0}'.format(corr)], tmax=Fit['tmaxes{0}'.format(corr)][i], a=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), b=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), dE=('dE:{0}'.format(tag), 'dE:o{0}'.format(tag)),s=(1,-1)))
for i,corr in enumerate(currents):
if corr in allcorrs:
models['{0}'.format(corr)] = []
for mass in Fit['masses']:
for twist in Fit['twists']:
for T in Fit['Ts']:
tag = Fit['threePtTag{0}'.format(corr)].format(T,Fit['m_s'],mass,Fit['m_l'],twist)
ptag = Fit['{0}-Tag'.format(parents[i])].format(mass)
dtag = Fit['{0}-Tag'.format(daughters[i])].format(twist)
if twist == '0' and corr in notwist0:
pass
elif twist == '0' and daughters[i] in non_oscillating:
models['{0}'.format(corr)].append(cf.Corr3(datatag=tag, T=T, tmin=Fit['{0}tmin'.format(corr)], a=('{0}:a'.format(dtag)), dEa=('dE:{0}'.format(dtag)), b=('{0}:a'.format(ptag), 'o{0}:a'.format(ptag)), dEb=('dE:{0}'.format(ptag), 'dE:o{0}'.format(ptag)), sb=(1,-1), Vnn='{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist), Vno='{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)))
else:
models['{0}'.format(corr)].append(cf.Corr3(datatag=tag, T=T, tmin=Fit['{0}tmin'.format(corr)], a=('{0}:a'.format(dtag), 'o{0}:a'.format(dtag)), dEa=('dE:{0}'.format(dtag), 'dE:o{0}'.format(dtag)), sa=(1,-1), b=('{0}:a'.format(ptag), 'o{0}:a'.format(ptag)), dEb=('dE:{0}'.format(ptag), 'dE:o{0}'.format(ptag)), sb=(1,-1), Vnn='{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist), Vno='{0}Vno_m{1}_tw{2}'.format(corr,mass,twist),Von='{0}Von_m{1}_tw{2}'.format(corr,mass,twist),Voo='{0}Voo_m{1}_tw{2}'.format(corr,mass,twist)))
#Now we make these models into our chain calculating an svd cut for each. We make them in two halves so we can sndwich a marginalisation term if we like later
if Chained:
finalmodelsA = []
finalmodelsB = []
intermediate = []
for key in links:
link = [] #link is models in link
for corr in links[key]:
link.extend(models['{0}'.format(corr)])
svd = SVD_diagnosis(Fit,link,links[key],svdfac,currents,SepMass)
finalmodelsA.append({'svdcut':svd})
finalmodelsA.append(tuple(link))
for key in parrlinks:
link = [] #link is models in link
for corr in parrlinks[key]:
link.extend(models['{0}'.format(corr)])
svd = SVD_diagnosis(Fit,link,parrlinks[key],svdfac,currents,SepMass)
intermediate.append({'svdcut':svd})
intermediate.append(tuple(link))
finalmodelsB.append(intermediate)
return(finalmodelsA,finalmodelsB)
else:
finalmodels = []
for corr in allcorrs:
finalmodels.extend(models['{0}'.format(corr)])
if NoSVD == False:
svd = SVD_diagnosis(Fit,finalmodels,allcorrs,svdfac,currents,SepMass)
return(tuple(finalmodels),svd)
else:
return(tuple(finalmodels))
#######################################################################################################
def elements_in_FitCorrs(a):
# reads [A,[B,C],[[D,E],F]] and interprets which elements will be chained and how. Returns alphabetical list of all elements, links in chain and links in parallell chain
allcorrs = []
links = collections.OrderedDict()
parrlinks = collections.OrderedDict()
for i in range(np.shape(a)[0]):
links[i] =[]
if len(np.shape(a[i])) == 0: #deals with one corr in chain
#print(a[i],i,'fit alone in chain')
allcorrs.append(a[i])
links[i].append(a[i])
elif len(np.shape(a[i][0])) == 0 : #deals with multiple elements in chain
for j in range(len(a[i])):
#print(a[i][j],i,'fit together in chain')
allcorrs.append(a[i][j])
links[i].append(a[i][j])
else:
del links[i] #don't need thi key if it is in paralell
for j in range(np.shape(a[i])[0]):
parrlinks[j] = []
if len(np.shape(a[i][j])) == 0: #deals with one corr in parr chain
allcorrs.append(a[i][j])
parrlinks[j].append(a[i][j])
else: # deals with multiple elements in parralell chain
for k in range(len(a[i][j])):
allcorrs.append(a[i][j][k])
parrlinks[j].append(a[i][j][k])
return(sorted(allcorrs),links,parrlinks)
######################################################################################################
def make_prior(Fit,N,allcorrs,currents,daughters,parents,loosener,data,notwist0,non_oscillating):
No = N # number of oscillating exponentials
prior = gv.BufferDict()
tw_corr = True
otw_corr = True
if len(daughters) != 0 and '0' in Fit['twists'] and tw_corr:
for corr in set(daughters).intersection(allcorrs):
prior['d2_{0}'.format(corr)] = gv.gvar('0.0(1.0)')
prior['c2_{0}'.format(corr)] = gv.gvar('0.0(1.0)')
print('Daughter twists correlated')
if len(daughters) != 0 and '0' in Fit['twists'] and otw_corr:
for corr in set(daughters).intersection(allcorrs):
prior['oc2_{0}'.format(corr)] = gv.gvar('0.0(1.0)')
print('Daughter oscillating twists correlated')
tp = Fit['tp']
En = '{0}({1})'.format(0.5*Fit['a'],0.25*Fit['a']*loosener) #Lambda with error of half
an = '{0}({1})'.format(gv.gvar(Fit['an']).mean,gv.gvar(Fit['an']).sdev*loosener)
aon = '{0}({1})'.format(gv.gvar(Fit['aon']).mean,gv.gvar(Fit['aon']).sdev*loosener)
for corr in allcorrs:
if corr in parents:
for mass in Fit['masses']:
tag = Fit['{0}-Tag'.format(corr)].format(mass)
M_eff = effective_mass_calc(tag,data[tag],tp)
a_eff = effective_amplitude_calc(tag,data[tag],tp,M_eff,Fit,corr)
# Parent
prior['log({0}:a)'.format(tag)] = gv.log(gv.gvar(N * [an]))
prior['log(dE:{0})'.format(tag)] = gv.log(gv.gvar(N * [En]))
prior['log({0}:a)'.format(tag)][0] = gv.log(gv.gvar(a_eff.mean,loosener*Fit['loosener']*a_eff.mean))
prior['log(dE:{0})'.format(tag)][0] = gv.log(gv.gvar(M_eff.mean,loosener*Fit['Mloosener']*M_eff.mean))
# Parent -- oscillating part
prior['log(o{0}:a)'.format(tag)] = gv.log(gv.gvar(No * [an]))
prior['log(dE:o{0})'.format(tag)] = gv.log(gv.gvar(No * [En]))
prior['log(dE:o{0})'.format(tag)][0] = gv.log(gv.gvar((M_eff+gv.gvar(En)*(4/5)).mean,loosener*Fit['oMloosener']*((M_eff+gv.gvar(En)*(4/5)).mean)))
if corr in daughters:
for twist in Fit['twists']:
if twist =='0' and corr in notwist0:
pass
else:
ap2 = 3*(np.pi*float(twist)/Fit['L'])**2
#print(twist,ap2)
tag0 = Fit['{0}-Tag'.format(corr)].format('0')
M_eff = np.sqrt(effective_mass_calc(tag0,data[tag0],tp)**2 + ap2) #from dispersion relation
tag = Fit['{0}-Tag'.format(corr)].format(twist)
a_eff = effective_amplitude_calc(tag,data[tag],tp,M_eff,Fit,corr)
# Daughter
prior['log({0}:a)'.format(tag)] = gv.log(gv.gvar(N * [an]))
prior['log(dE:{0})'.format(tag)] = gv.log(gv.gvar(N * [En]))
#prior['log(dE:{0})'.format(tag)][1] = gv.log(gv.gvar(gv.gvar(En).mean,0.01*gv.gvar(En).mean))
if twist !='0' and '0' in Fit['twists'] and 'log(dE:{0})'.format(tag0) in prior and tw_corr:
prior['log(dE:{0})'.format(tag)][0] = gv.log(gv.sqrt(prior['dE:{0}'.format(tag0)][0]**2 + ap2) * (1 + prior['c2_{0}'.format(corr)]*ap2/(np.pi)**2) )
prior['log({0}:a)'.format(tag)][0] = gv.log((prior['{0}:a'.format(tag0)][0]/gv.sqrt(gv.sqrt(1 + ap2/(prior['dE:{0}'.format(tag0)][0])**2))) * (1 + prior['d2_{0}'.format(corr)]*ap2/(np.pi)**2) )
else:
prior['log(dE:{0})'.format(tag)][0] = gv.log(gv.gvar(M_eff.mean,loosener*Fit['Mloosener']*M_eff.mean))
prior['log({0}:a)'.format(tag)][0] = gv.log(gv.gvar(a_eff.mean,loosener*Fit['loosener']*a_eff.mean))
# Daughter -- oscillating part
if twist =='0' and corr in non_oscillating:
pass
else:
newaon = aon
if twist == '0':
newaon = '{0}({1})'.format(gv.gvar(aon).mean/4,gv.gvar(aon).mean/2) #v small in the case of tw0
prior['log(o{0}:a)'.format(tag)] = gv.log(gv.gvar(No * [newaon]))
prior['log(dE:o{0})'.format(tag)] = gv.log(gv.gvar(No * [En]))
if twist !='0' and '0' in Fit['twists'] and 'log(dE:o{0})'.format(tag0) in prior and otw_corr:
prior['log(dE:o{0})'.format(tag)][0] = gv.log(gv.sqrt(prior['dE:o{0}'.format(tag0)][0]**2 + ap2) * (1 + prior['oc2_{0}'.format(corr)]*ap2/(np.pi)**2) )
#prior['log(o{0}:a)'.format(tag)][0] = gv.log((prior['o{0}:a'.format(tag0)][0]/gv.sqrt(1 + ap2/(prior['dE:o{0}'.format(tag0)][0])**2)) * (1 + prior['od2']*ap2/(np.pi)**2) )
prior['log(o{0}:a)'.format(tag)][0] = gv.log(gv.gvar(gv.gvar(newaon).mean,loosener*Fit['oloosener']*gv.gvar(newaon).mean))
else:
prior['log(dE:o{0})'.format(tag)][0] = gv.log(gv.gvar((M_eff+gv.gvar(En)/2).mean,loosener*Fit['oMloosener']*((M_eff+gv.gvar(En)/2).mean))) # kaon splitting
#prior['log(dE:o{0})'.format(tag)][0] = gv.log(prior['dE:{0}'.format(tag)][0] + gv.gvar(En))
prior['log(o{0}:a)'.format(tag)][0] = gv.log(gv.gvar(gv.gvar(newaon).mean,loosener*Fit['oloosener']*gv.gvar(newaon).mean))
if corr in currents:
for mass in Fit['masses']:
for twist in Fit['twists']:
if twist =='0' and corr in notwist0:
pass
else:
daughter=daughters[currents.index(corr)]
parent=parents[currents.index(corr)]
dcorr = data[Fit['{0}-Tag'.format(daughter)].format(twist)]
pcorr = data[Fit['{0}-Tag'.format(parent)].format(mass)]
correlator = data[Fit['threePtTag{0}'.format(corr)].format(Fit['Ts'][-1],Fit['m_s'],mass,Fit['m_l'],twist)]
ptag = Fit['{0}-Tag'.format(parent)].format(mass)
pM_eff = effective_mass_calc(ptag,data[ptag],tp)
pa_eff = effective_amplitude_calc(ptag,data[ptag],tp,pM_eff,Fit,parent)
dtag = Fit['{0}-Tag'.format(daughter)].format(twist)
dM_eff = effective_mass_calc(dtag,data[dtag],tp)
da_eff = effective_amplitude_calc(dtag,data[dtag],tp,dM_eff,Fit,daughter)
V_eff = effective_V_calc(corr,daughter,parent,correlator,dcorr,pcorr,Fit,mass,twist,da_eff,pa_eff)
if V_eff.mean != gv.gvar(Fit['{0}Vnn0'.format(corr)]).mean:
Vnn0 = '{0}({1})'.format(V_eff.mean,loosener*V_eff.mean*Fit['Vloosener'])
else:
Vnn0 = '{0}({1})'.format(V_eff.mean,loosener*V_eff.sdev)
Vn = '{0}({1})'.format(gv.gvar(Fit['{0}Vn'.format(corr)]).mean,loosener*gv.gvar(Fit['{0}Vn'.format(corr)]).sdev)
V0 = '{0}({1})'.format(gv.gvar(Fit['{0}V0'.format(corr)]).mean,loosener*gv.gvar(Fit['{0}V0'.format(corr)]).sdev)
if twist =='0' and corr in notwist0:
pass
elif twist =='0' and daughters[currents.index(corr)] in non_oscillating :
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [N * [Vn]])
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(Vnn0)
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [No* [Vn]])
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
else:
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [N * [Vn]])
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(Vnn0)
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [No * [Vn]])
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
prior['{0}Voo_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(No * [No * [Vn]])
prior['{0}Voo_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
prior['{0}Von_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(No * [N * [Vn]])
prior['{0}Von_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
# for key in prior:
# if key[0] == corr:
# for i in range(1,N):
# for j in range(1,N):
# prior[key][i][j] = gv.gvar('0.0(5)')
return(prior)
######################################################################################################
def get_p0(Fit,fittype,Nexp,allcorrs,prior,FitCorrs):
# We want to take in several scenarios in this order, choosing the highest in preference.
# 1) This exact fit has been done before, modulo priors, svds t0s etc
# 2) Same but different type of fit, eg marginalised
# 3) This fit has been done before with Nexp+1
# 4) This fit has been done beofore with Nexp-1
# 5a) Some elemnts have bene fitted to Nexp before,
# 5b) Some elements of the fit have been fitted in other combinations before
filename1 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp)
filename2 = 'p0/{0}{1}{2}{3}{4}{5}{6}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),Nexp)
filename3 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp+1)
filename4 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp-1)
filename5a = 'p0/{0}{1}{2}'.format(Fit['conf'],Fit['filename'],Nexp)
filename5b = 'p0/{0}{1}'.format(Fit['conf'],Fit['filename'])
#case 1
if os.path.isfile(filename1):
p0 = gv.load(filename1)
print('Loaded p0 from exact fit')
#case 2
elif os.path.isfile(filename2):
p0 = gv.load(filename2)
print('Loaded p0 from exact fit of different type')
#case 3
elif os.path.isfile(filename3):
p0 = gv.load(filename3)
print('Loaded p0 from exact fit Nexp+1')
#case 4
elif os.path.isfile(filename4):
p0 = gv.load(filename4)
print('Loaded p0 from exact fit Nexp-1')
#case 5
elif os.path.isfile(filename5b):
p0 = gv.load(filename5b)
print('Loaded global p0')
if os.path.isfile(filename5a):
pnexp = gv.load(filename5a)
for key in pnexp:
if key in prior:
if key not in p0:
print('Error: {0} in global Nexp but not in global fit'.format(key))
p0[key] = pnexp[key]
del p0[key]
p0[key] = pnexp[key]
print('Loaded {0} p0 from global Nexp'.format(key))
else:
p0 = None
return(p0)
######################################################################################################
def update_p0(p,finalp,Fit,fittype,Nexp,allcorrs,FitCorrs,Q,marg=False):
# We want to take in several scenarios in this order
# 1) This exact fit has been done before, modulo priors, svds t0s etc
# 2) Same but different type of fit, eg marginalised
# 3) Global Nexp
# 4) Global
# 5) if Marg is True, we don't want to save anything but filename 1 as Nexp = nmarg and is not similar to if we do other fits
filename1 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp)
filename2 = 'p0/{0}{1}{2}{3}{4}{5}{6}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),Nexp)
filename3 = 'p0/{0}{1}{2}'.format(Fit['conf'],Fit['filename'],Nexp)
filename4 = 'p0/{0}{1}'.format(Fit['conf'],Fit['filename'])
#case 1
for element in ['c2','d2','oc2']:
for corr in allcorrs:
if '{0}_{1}'.format(element,corr) in p:
del p['{0}_{1}'.format(element,corr)]
for element in ['c2','d2','oc2']:
for corr in allcorrs:
if '{0}_{1}'.format(element,corr) in finalp:
del finalp['{0}_{1}'.format(element,corr)]
gv.dump(p,filename1)
if marg == False:
#case 2
gv.dump(finalp,filename2)
#case 3
if os.path.isfile(filename3) and Q > 0.05:
p0 = gv.load(filename3) #load exisiting global Nexp
for key in finalp: # key in this output
p0[key] = finalp[key] #Update exisiting and add new
gv.dump(p0,filename3)
else:
gv.dump(finalp,filename3)
if os.path.isfile(filename4) and Q > 0.05:
p0 = gv.load(filename4) # load existing, could be any length
for key in finalp: # key in new
if key in p0: # if
if len(np.shape(p0[key])) == 1 and len(p0[key]) <= Nexp:
#print('shape p0[key]',np.shape(p0[key]),key)
del p0[key]
p0[key] = finalp[key]
print('Updated global p0 {0}'.format(key))
elif np.shape(p0[key])[0] <= Nexp:
#print('shape p0[key]',np.shape(p0[key]),key)
del p0[key]
p0[key] = finalp[key]
print('Updated global p0 {0}'.format(key))
else:
p0[key] = finalp[key]
print('Added new element to global p0 {0}'.format(key))
gv.dump(p0,filename4)
else:
gv.dump(finalp,filename4)
return()
######################################################################################################
def save_fit(fit,Fit,allcorrs,fittype,Nexp,SvdFactor,PriorLoosener,currents,smallsave):
filename = 'Fits/{0}{1}{2}{3}{4}{5}{6}_Nexp{7}_sfac{8}_pfac{9}_Q{10:.2f}_chi{11:.3f}_sm{12}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),fittype,Nexp,SvdFactor,PriorLoosener,fit.Q,fit.chi2/fit.dof,smallsave)
for corr in allcorrs:
if corr in currents:
filename += '_{0}tmin{1}'.format(corr,Fit['{0}tmin'.format(corr)])
savedict = gv.BufferDict()
if smallsave:
for key in fit.p:
if key[0] == 'l':
key2 = key.split('(')[1].split(')')[0]
if key2.split(':')[0] =='dE' and key2.split(':')[1][0] != 'o':
savedict[key] = [fit.p[key][0]] #was palt
elif key[2] =='n' and key[3] == 'n':
savedict[key] = [[fit.p[key][0][0]]] #was palt
elif smallsave == False:
savedict = fit.p
print('Started gv.gdump to {1}, smallsave = {0}'.format(smallsave,'{0}.pickle'.format(filename)),datetime.datetime.now())
gv.gdump(savedict,'{0}.pickle'.format(filename))
print('Finished gv.gdump fit, starting save fit output',datetime.datetime.now())
f = open('{0}.txt'.format(filename),'w')
f.write(fit.format(pstyle='v'))
f.close()
print('Finished save fit output',datetime.datetime.now())
return()
######################################################################################################
def do_chained_fit(data,prior,Nexp,modelsA,modelsB,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,FitCorrs,save,smallsave,GBF):#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with no marginalisation Nexp = NMax
models = copy.deepcopy(modelsA)
if len(modelsB[0]) !=0:
models.extend(modelsB)
print('Models',models)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'chained',Nexp,allcorrs,prior,FitCorrs)
print(30 * '=','Chained-Unmarginalised','Nexp =',Nexp,'Date',datetime.datetime.now())
fit = fitter.chained_lsqfit(data=data, prior=prior, p0=p0, noise=noise,debug=True)
update_p0([f.pmean for f in fit.chained_fits.values()],fit.pmean,Fit,'chained',Nexp,allcorrs,FitCorrs,fit.Q) #fittype=chained, for marg,includeN
if GBF == None:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return()
elif fit.logGBF - GBF < 1 and fit.logGBF - GBF > 0:
print('log(GBF) went up by less than 1: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
elif fit.logGBF - GBF < 0:
print('log(GBF) went down {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
else:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
print('log(GBF) went up {0:.2f}'.format(fit.logGBF - GBF))
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return(fit.logGBF)
######################################################################################################
def do_chained_marginalised_fit(data,prior,Nexp,modelsA,modelsB,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,FitCorrs,save,smallsave,GBF,Marginalised):#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with marginalisation nterm = nexp,nexp Nmarg=Marginalisation us in p0 bits
models = copy.deepcopy(modelsA)
if len(modelsB[0]) !=0:
models.append(dict(nterm=(Nexp,Nexp)))
models.extend(modelsB)
else:
print('Marginalisation not applied as no parrallelised models')
print('Models',models)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,allcorrs,prior,FitCorrs)
print(30 * '=','Chained-marginalised','Nexp =',Marginalised,'nterm = ({0},{0})'.format(Nexp),'Date',datetime.datetime.now())
fit = fitter.chained_lsqfit(data=data, prior=prior, p0=p0, noise=noise,debug=True)
update_p0([f.pmean for f in fit.chained_fits.values()],fit.pmean,Fit,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,allcorrs,FitCorrs,fit.Q,True) #fittype=chained, for marg,includeN
if GBF == None:
print(fit)#.format(pstyle='m'))
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return()
elif fit.logGBF - GBF < 1 and fit.logGBF - GBF > 0:
print('log(GBF) went up by less than 1: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
elif fit.logGBF - GBF < 0:
print('log(GBF) went down {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
else:
print(fit)#.format(pstyle='m'))
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
print('log(GBF) went up {0:.2f}'.format(fit.logGBF - GBF))
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return(fit.logGBF)
######################################################################################################
def do_unchained_fit(data,prior,Nexp,models,svdcut,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,save,smallsave,GBF):#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with no marginalisation Nexp = NMax
print('Models',models)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'unchained',Nexp,allcorrs,prior,allcorrs) # FitCorrs = allcorrs
print(30 * '=','Unchained-Unmarginalised','Nexp =',Nexp,'Date',datetime.datetime.now())
fit = fitter.lsqfit(pdata=data, prior=prior, p0=p0, svdcut=svdcut, noise=noise,debug=True)
update_p0(fit.pmean,fit.pmean,Fit,'unchained',Nexp,allcorrs,allcorrs,fit.Q) #fittype=chained, for marg,includeN
if GBF == None:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)#,Fit)
print_Z_V(fit.p,Fit,allcorrs)
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'unchained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return()
elif fit.logGBF - GBF < 1 and fit.logGBF - GBF > 0:
print('log(GBF) went up by less than 1: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
elif fit.logGBF - GBF < 0:
print('log(GBF) went down: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
else:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)#,Fit)
print_Z_V(fit.p,Fit,allcorrs)
print('log(GBF) went up more than 1: {0:.2f}'.format(fit.logGBF - GBF))
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'unchained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return(fit.logGBF)
#######################################################################################################
def do_sep_mass_fit(data,prior,Nexp,models,svdcut,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,save,smallsave,GBF):
#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with no marginalisation Nexp = NMax
print('Models',models)
#print(data)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'sepmass',Nexp,allcorrs,prior,allcorrs) # FitCorrs = allcorrs
print(30 * '=','Seperate Mass Fit','Nexp =',Nexp,'Date',datetime.datetime.now())
fit = fitter.lsqfit(pdata=data, prior=prior, p0=p0, svdcut=svdcut, noise=noise,debug=True)
update_p0(fit.pmean,fit.pmean,Fit,'sepmass',Nexp,allcorrs,allcorrs,fit.Q) #fittype=chained, for marg,includeN
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)#,Fit)
return(fit)
######################################################################################################
def combine_sep_mass_fits(result,Fit,priors,allcorrs,Nexp,SvdFactor,PriorLoosener,currents,save,smallsave):
prior = gv.BufferDict()
combined = []
for mass in Fit['masses']:
smallresult = gv.BufferDict()
fit = result[mass].p
for key in fit:
if key[0] == 'l':
key2 = key.split('(')[1].split(')')[0]
if key2.split(':')[0] =='dE':
smallresult[key] = [fit[key][0]]
elif key[2] =='n' and key[3] == 'n':
smallresult[key] = [[fit[key][0][0]]]
combined.append(smallresult)
prior = copy.deepcopy(priors[Fit['masses'][0]])
for mass in Fit['masses']:
for key in priors[mass]:
if key not in prior:
prior[key] = copy.deepcopy(priors[mass][key])
#print(combined)
final = lsqfit.wavg(combined)
#print(gv.evalcorr([final['SVnn_m0.433_tw0.8563'][0][0],final['SVnn_m0.683_tw0.8563'][0][0]]))
chi = 0
Q = 0
GBF = 0
for mass in Fit['masses']:
chi += (result[mass].chi2/result[mass].dof)/len(Fit['masses'])
Q += (result[mass].Q)/len(Fit['masses'])
GBF += result[mass].logGBF
print('Mean chi^2/dof = {0:.3f} Q = {1:.3f}, total logGBF {2:.1f}'.format(chi,Q,GBF))
print_results(final,prior)#,Fit)
print_Z_V(final,Fit,allcorrs)
if save:
save_combined_fit(final,Fit,allcorrs,'sep_mass',Nexp,SvdFactor,PriorLoosener,currents,smallsave,chi,Q)
return()
######################################################################################################################
def save_combined_fit(fit,Fit,allcorrs,fittype,Nexp,SvdFactor,PriorLoosener,currents,smallsave,chi,Q):
filename = 'Fits/{0}{1}{2}{3}{4}{5}{6}_Nexp{7}_sfac{8}_pfac{9}_Q{10:.2f}_chi{11:.3f}_sm{12}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),fittype,Nexp,SvdFactor,PriorLoosener,Q,chi,smallsave)
for corr in allcorrs:
if corr in currents:
filename += '_{0}tmin{1}'.format(corr,Fit['{0}tmin'.format(corr)])
savedict = gv.BufferDict()
if smallsave:
for key in fit:
if key[0] == 'l':
key2 = key.split('(')[1].split(')')[0]
if key2.split(':')[0] =='dE' and key2.split(':')[1][0] != 'o':
savedict[key] = [fit[key][0]]
elif key[2] =='n' and key[3] == 'n':
savedict[key] = [[fit[key][0][0]]]
elif smallsave == False:
print('Error, can only do small save with sep masses' )
#print(gv.evalcorr([savedict['SVnn_m0.433_tw0.8563'][0][0],savedict['SVnn_m0.683_tw0.8563'][0][0]]))
print('Started gv.gdump to {1}, smallsave = {0}'.format(smallsave,'{0}.pickle'.format(filename)),datetime.datetime.now())
gv.gdump(savedict,'{0}.pickle'.format(filename))
print('Finished gv.gdump fit',datetime.datetime.now())
return()
######################################################################################################
def print_p_p0(p,p0,prior):
print('{0:<30}{1:<20}{2:<40}{3:<20}'.format('key','p','p0','prior'))
for key in prior:
if len(np.shape(p[key])) ==1 :
for element in range(len(p[key])):
if element == 0:
print('{0:<30}{1:<20}{2:<40}{3:<20}'.format(key,p[key][element],p0[key][element],prior[key][element]))
else:
print('{0:>30}{1:<20}{2:<40}{3:<20}'.format('',p[key][element],p0[key][element],prior[key][element]))
return()
#####################################################################################################
def print_results(p,prior):#,Fit):
print(100*'-')
print('{0:<30}{1:<15}{2:<15}{3:<15}{4}'.format('key','p','p error','prior','prior error'))
print(100*'-')
print('Ground state energies')
print(100*'-')
for key in prior:
if key[0] == 'l':
key = key.split('(')[1].split(')')[0]
if key.split(':')[0] =='dE' and key.split(':')[1][0] != 'o':
print('{0:<30}{1:<15}{2:<15.3%}{3:<15}{4:.2%}'.format(key,p[key][0],p[key][0].sdev/p[key][0].mean,prior[key][0],prior[key][0].sdev/prior[key][0].mean))
#if '{0}'.format(key.split(':')[1]) == Fit['BG-Tag'].format(Fit['masses'][0]):
# print('split: ', p['dE:{0}'.format(Fit['BNG-Tag'].format(Fit['masses'][0]))][0]-p[key][0])
print('')
print('Oscillating ground state energies')
print(100*'-')
for key in prior:
if key[0] == 'l':
key = key.split('(')[1].split(')')[0]
if key.split(':')[0] =='dE' and key.split(':')[1][0] == 'o':
print('{0:<30}{1:<15}{2:<15.3%}{3:<15}{4:.2%}'.format(key,p[key][0],p[key][0].sdev/p[key][0].mean,prior[key][0],prior[key][0].sdev/prior[key][0].mean))
print('')
print('V_nn[0][0]')
print(100*'-')
for key in prior:
if key[1] != '2' and key[2] =='n' and key[3] == 'n':
print('{0:<30}{1:<15}{2:<15.3%}{3:<15}{4:.2%}'.format(key,p[key][0][0],p[key][0][0].sdev/p[key][0][0].mean,prior[key][0][0],prior[key][0][0].sdev/prior[key][0][0].mean))
print(100*'-')
return()
#####################################################################################################
def make_Z_V(m_h,m_s,M_parent,M_daughter,S,V):
Z_V = (m_h-m_s)/(M_parent-M_daughter) * S/V
return(Z_V)
#####################################################################################################
# needs generalising
#####################################################################################################
def print_Z_V(p,Fit,allcorrs):
if 'S' in allcorrs and 'V' in allcorrs:
print(100*'-')
for mass in Fit['masses']:
M_parent = p['dE:{0}'.format(Fit['{0}-Tag'.format('BG')].format(mass))][0]
M_daughter = p['dE:{0}'.format(Fit['{0}-Tag'.format('KG')].format('0'))][0]
S = p['SVnn_m{0}_tw0'.format(mass)][0][0]
V = p['VVnn_m{0}_tw0'.format(mass)][0][0]
Z_V = make_Z_V(float(mass),float(Fit['m_s']),M_parent,M_daughter,S,V)
print("Mass = {0} Z_V = {1}".format(mass,Z_V))
print(100*'-')
return()
#####################################################################################################
|
WillParrott/New_bodiddley_fitter
|
functions.py
|
functions.py
|
py
| 47,341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "lsqfit.nonlinear_fit.set",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "lsqfit.nonlinear_fit",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "corrfitter.read_dataset",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "gvar.dataset.avg_data",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "gvar.dataset",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "corrfitter.read_dataset",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "corrfitter.process_dataset",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "gvar.arccosh",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "gvar.sqrt",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "gvar.exp",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "gvar.dataset.svd_diagnosis",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "gvar.dataset",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "corrfitter.read_dataset",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "gvar.mean",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "gvar.sdev",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "corrfitter.Corr2",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "corrfitter.Corr2",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "corrfitter.Corr2",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "corrfitter.Corr3",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "corrfitter.Corr3",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "gvar.BufferDict",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "gvar.sqrt",
"line_number": 421,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 421,
"usage_type": "attribute"
},
{
"api_name": "gvar.log",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "gvar.sqrt",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "gvar.log",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "gvar.sqrt",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "gvar.log",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "gvar.log",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "gvar.gvar",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 515,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 520,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 524,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 534,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "gvar.dump",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "gvar.dump",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 582,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 583,
"usage_type": "call"
},
{
"api_name": "gvar.dump",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "gvar.dump",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 591,
"usage_type": "attribute"
},
{
"api_name": "gvar.load",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "gvar.dump",
"line_number": 608,
"usage_type": "call"
},
{
"api_name": "gvar.dump",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "gvar.BufferDict",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 631,
"usage_type": "attribute"
},
{
"api_name": "gvar.gdump",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 633,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 637,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "corrfitter.CorrFitter",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 651,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 651,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "corrfitter.CorrFitter",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 693,
"usage_type": "attribute"
},
{
"api_name": "corrfitter.CorrFitter",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 729,
"usage_type": "attribute"
},
{
"api_name": "corrfitter.CorrFitter",
"line_number": 765,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 767,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 767,
"usage_type": "attribute"
},
{
"api_name": "gvar.BufferDict",
"line_number": 778,
"usage_type": "call"
},
{
"api_name": "gvar.BufferDict",
"line_number": 781,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 792,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 796,
"usage_type": "call"
},
{
"api_name": "lsqfit.wavg",
"line_number": 798,
"usage_type": "call"
},
{
"api_name": "gvar.BufferDict",
"line_number": 821,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 833,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 833,
"usage_type": "attribute"
},
{
"api_name": "gvar.gdump",
"line_number": 834,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 835,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 835,
"usage_type": "attribute"
},
{
"api_name": "numpy.shape",
"line_number": 842,
"usage_type": "call"
}
] |
30097122943
|
""" Comments scraper class"""
import json
import logging
import random
from time import sleep
import numpy as np
import pandas as pd
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from scripts.argparser import ArgParser
from scripts.auxiliary.scraper_aux import check_or_create_folders, save
from scripts.chrome_driver import ChromeDriver
logging.basicConfig(
format="%(asctime)s | %(levelname)s: %(message)s", level=logging.CRITICAL
)
class CommentsScraper:
def __init__(self):
pass
@staticmethod
def iterate(args, ttw, driver):
# ~ 24 comments loaded each iteration
check = True
MAX_ITER = int(args.max_iterations)
n = 0
while check and n <= MAX_ITER:
sleep(int(random.choice(ttw)))
try:
load_more_comments_button = WebDriverWait(driver, 5).until(
EC.visibility_of_element_located(
(By.CSS_SELECTOR, "[aria-label='Load more comments']")
)
)
load_more_comments_button.click()
except:
check = False
if n == MAX_ITER:
logging.warning(
"Reached the max iterations number before exhausting the post comments. \
You may consider to raise the max iterations number"
)
else:
logging.info("Exhausted all the post comments")
n = n + 1
@classmethod
def setup(self):
argparser = ArgParser()
chrome_driver = ChromeDriver()
proxy = chrome_driver.set_up_proxy()
args = argparser.likes_scraper_read_input()
driver = chrome_driver.set_up_driver(proxy=proxy)
driver = chrome_driver.make_IG_access_w_creds(
driver=driver, ig_usr=args.username, ig_pass=args.password
)
return driver, proxy, args
@classmethod
def scrape(self, driver, proxy, args, save_raw_data=bool, **kwargs):
if "target_post" in kwargs:
proxy.new_har(
kwargs.get("target_post"),
options={"captureHeaders": True, "captureContent": True},
)
driver.get(kwargs.get("target_post"))
target = kwargs.get("target_post")
else:
proxy.new_har(
args.target_post,
options={"captureHeaders": True, "captureContent": True},
)
driver.get(args.target_post)
target = args.target_post
# Random time intervals to sleep between load more comment button pushes
ttw = []
for i in range(0, 20):
ttw.append(np.round(random.uniform(4, 8), 2))
# ~ 24 comments loaded each iteration
CommentsScraper.iterate(args=args, ttw=ttw, driver=driver)
R = json.loads(json.dumps(proxy.har, ensure_ascii=False))
if save_raw_data:
save(data=R, target=target, args=args)
return R
@classmethod
def parse_and_save_data(self, raw_data, args, target):
"Parse raw scraped data and write to disk"
RAW = {}
for n, v in enumerate(raw_data["log"]["entries"]):
if v["response"]["content"]["mimeType"] in [
"application/json; charset=utf-8",
"application/json",
]:
try:
RAW[n] = json.loads(v["response"]["content"]["text"])["comments"]
except:
pass
comments_df = pd.DataFrame.from_dict(RAW[list(RAW.keys())[0]])
for k in list(RAW.keys())[1:]:
comments_df = pd.concat([comments_df, pd.DataFrame.from_dict(RAW[k])])
comments_df = comments_df.reset_index(drop=True)
check_or_create_folders(target=target, args=args)
short_code = target.split("/")[-1]
# TODO FIX get the profile name from somewhere and create the correct folder!
comments_df.to_csv(
f"{args.output_folder}/{short_code}_comments_clean.csv", mode="w+"
)
logging.info("Data correctly saved/overwrote.")
# print(f'Dave saved in: {f"{args.output_folder}_{short_code}_comments_clean.csv"}')
return comments_df
|
ScrPzz/InstagramScraper
|
src/comments_scraper.py
|
comments_scraper.py
|
py
| 4,554 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.CRITICAL",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "logging.warning",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "scripts.argparser.ArgParser",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "scripts.chrome_driver.ChromeDriver",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "scripts.auxiliary.scraper_aux.save",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "scripts.auxiliary.scraper_aux.check_or_create_folders",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 131,
"usage_type": "call"
}
] |
6605463296
|
from itertools import combinations
from collections import Counter
def solution(orders, course):
answer = []
for c in course:
temp = []
for order in orders:
combi = combinations(sorted(order), c)
temp += combi
counter = Counter(temp)
if len(counter) != 0 and max(counter.values()) != 1:
for cnt in counter:
if counter[cnt] == max(counter.values()):
answer.append("".join(cnt))
return sorted(answer)
|
JeongGod/Algo-study
|
3people/6week/p72411.py
|
p72411.py
|
py
| 457 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "itertools.combinations",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
}
] |
33353045212
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.db.models.signals import pre_delete
from django.dispatch import receiver
class CommonInfo(models.Model):
# 开始时间 auto_now_add=True,
startday = models.DateField(verbose_name="下单时间", null=True)
# 结束时间
endday = models.DateField(verbose_name="交货时间", null=True)
# 备注
remark = models.TextField(default="", verbose_name="备注")
status_choice = (
('pending', "未开始"),
('process', "进行中"),
('finish', "已完成")
)
status = models.CharField(
max_length=10,
choices=status_choice,
default='pending',
verbose_name="订单状态",
)
class Meta:
abstract = True
class Productplan(CommonInfo):
# 订单号
orderid = models.CharField(max_length=10, verbose_name="订单号")
# 订单分类
category_choice = (
('std', "标准"),
('unstd', "非标")
)
category = models.CharField(
max_length=5,
choices=category_choice,
default='std',
verbose_name="订单类型",
)
# 型号
productid = models.CharField(max_length=20, verbose_name="产品型号")
# 序列号
serial = models.CharField(max_length=20, verbose_name="序列号")
# 用户
customer = models.CharField(max_length=20, verbose_name="用户")
# 配置单
conffile = models.FileField(upload_to='service_files/%Y%m%d/', blank=True)
# 更新时间
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.customer + "_SN" + self.serial
class Meta:
ordering = ('-updated',)
verbose_name_plural = "订单管理"
@receiver(pre_delete, sender=Productplan)
def delete(sender, instance, **kwargs):
instance.conffile.delete(False)
class ProcessElPrepare(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='proel',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "电路板准备"
class ProcessMePrepare(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prome',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "机械件准备"
class ProcessScPrepare(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prosc',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "干涉仪准备"
class ProcessAssemble(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='proas',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "装配中"
class ProcessTesting(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prots',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "测试中"
def __str__(self):
return self.orderid.customer
# 添加软件状态
class ProcessSoftware(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prosw',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "软件"
def __str__(self):
return self.orderid.customer
# 添加付款状态
class ProcessPayment(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='propm',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "付款"
def __str__(self):
return self.orderid.customer
# 添加发货状态
class ProcessDeliver(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prodi',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "发货"
def __str__(self):
return self.orderid.customer
# 添加开票状态
class ProcessBilling(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='probi',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "开票"
def __str__(self):
return self.orderid.customer
# 添加尾款状态
class ProcessDueing(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='produe',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "尾款"
def __str__(self):
return self.orderid.customer
class ProductHistory(models.Model):
# 操作者
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='user'
)
# 订单
orderid = models.ForeignKey(
Productplan,
verbose_name="订单号",
on_delete=models.CASCADE,
related_name='product_history',
primary_key=False
)
# 操作项目
proitem = models.CharField(max_length=10, verbose_name="操作项目")
# 新内容
newcontent = models.CharField(max_length=10, verbose_name="新内容")
# 操作时间
operateday = models.DateTimeField(verbose_name="操作时间", null=True)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "订单历史"
def __str__(self):
return self.orderid.customer
|
willmaker2022/drfvueblog
|
productplan/models.py
|
models.py
|
py
| 6,808 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.db.models.FileField",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.dispatch.receiver",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.pre_delete",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "django.db.models.OneToOneField",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 247,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 269,
"usage_type": "name"
}
] |
43573832015
|
import argparse
from experiment.monitor import monitor
from apps.qe import qe
if __name__ == '__main__':
parser = argparse.ArgumentParser()
monitor.setup_run_args(parser)
qe.setup_run_args(parser)
args, extra_args = parser.parse_known_args()
app_conf = qe.QuantumEspressoAppConf(args.node_count, args.benchmark_name)
monitor.launch(app_conf=app_conf, args=args,
experiment_cli_args=extra_args)
|
geopm/geopm
|
integration/experiment/monitor/run_monitor_qe.py
|
run_monitor_qe.py
|
py
| 440 |
python
|
en
|
code
| 79 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "experiment.monitor.monitor.setup_run_args",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "experiment.monitor.monitor",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "apps.qe.qe.setup_run_args",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apps.qe.qe",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "apps.qe.qe.QuantumEspressoAppConf",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "apps.qe.qe",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "experiment.monitor.monitor.launch",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "experiment.monitor.monitor",
"line_number": 13,
"usage_type": "name"
}
] |
38036093112
|
import logging
from collections import defaultdict
from typing import Dict, Optional, Tuple, Union
import numpy as np
from matplotlib import rcParams
from matplotlib.axes import SubplotBase
from matplotlib.axis import Axis
from matplotlib.colors import LogNorm
from matplotlib.ticker import AutoMinorLocator, MaxNLocator
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
)
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.plotter import BSPlotter
from sumo.plotting import pretty_plot
from sumo.symmetry import Kpath, PymatgenKpath
from amset.constants import defaults, hbar
from amset.interpolation.bandstructure import Interpolator
from amset.interpolation.periodic import PeriodicLinearInterpolator
from amset.log import initialize_amset_logger
from amset.plot import BaseMeshPlotter, amset_base_style, styled_plot
__author__ = "Alex Ganose"
__maintainer__ = "Alex Ganose"
__email__ = "[email protected]"
logger = logging.getLogger(__name__)
class LineshapePlotter(BaseMeshPlotter):
def __init__(
self,
data,
interpolation_factor=5,
print_log=defaults["print_log"],
symprec=defaults["symprec"],
):
super().__init__(data)
self.interpolation_factor = interpolation_factor
if print_log:
initialize_amset_logger(filename="lineshape.log")
self.symprec = symprec
def _get_interpolater(self, n_idx, t_idx, mode="linear"):
props = defaultdict(dict)
for spin in self.spins:
# calculate total rate
spin_rates = np.sum(self.scattering_rates[spin][:, n_idx, t_idx], axis=0)
# easier to interpolate the log
log_rates = np.log10(spin_rates)
# # handle rates that close to numerical noise
log_rates[log_rates > 18] = 15
log_rates[np.isnan(log_rates)] = 15
# map to full k-point mesh
props[spin]["rates"] = log_rates
if mode == "linear":
return _LinearBandStructureInterpolator(
self.kpoints,
self.ir_to_full_kpoint_mapping,
self.energies,
self.structure,
self.efermi,
props,
)
elif mode == "fourier":
bs = BandStructure(
self.ir_kpoints,
self.energies,
self.structure.lattice,
self.efermi,
structure=self.structure,
)
return Interpolator(
bs,
self.num_electrons,
interpolation_factor=self.interpolation_factor,
soc=self.soc,
other_properties=props,
)
raise ValueError("Unknown interpolation mode; should be 'linear' or 'fourier'.")
@styled_plot(amset_base_style)
def get_plot(
self,
n_idx,
t_idx,
zero_to_efermi=True,
estep=0.01,
line_density=100,
height=3.2,
width=3.2,
emin=None,
emax=None,
amin=5e-5,
amax=1e-1,
ylabel="Energy (eV)",
plt=None,
aspect=None,
kpath=None,
cmap="viridis",
colorbar=True,
style=None,
no_base_style=False,
fonts=None,
):
interpolater = self._get_interpolater(n_idx, t_idx)
bs, prop = interpolater.get_line_mode_band_structure(
line_density=line_density,
return_other_properties=True,
kpath=kpath,
symprec=self.symprec,
)
bs, rates = force_branches(bs, {s: p["rates"] for s, p in prop.items()})
fd_emin, fd_emax = self.fd_cutoffs
if not emin:
emin = fd_emin
if zero_to_efermi:
emin -= bs.efermi
if not emax:
emax = fd_emax
if zero_to_efermi:
emax -= bs.efermi
logger.info("Plotting band structure")
if isinstance(plt, (Axis, SubplotBase)):
ax = plt
else:
plt = pretty_plot(width=width, height=height, plt=plt)
ax = plt.gca()
if zero_to_efermi:
bs.bands = {s: b - bs.efermi for s, b in bs.bands.items()}
bs.efermi = 0
bs_plotter = BSPlotter(bs)
plot_data = bs_plotter.bs_plot_data(zero_to_efermi=zero_to_efermi)
energies = np.linspace(emin, emax, int((emax - emin) / estep))
distances = np.array([d for x in plot_data["distances"] for d in x])
# rates are currently log(rate)
mesh_data = np.full((len(distances), len(energies)), 0.0)
for spin in self.spins:
for spin_energies, spin_rates in zip(bs.bands[spin], rates[spin]):
for d_idx in range(len(distances)):
energy = spin_energies[d_idx]
linewidth = 10 ** spin_rates[d_idx] * hbar / 2
broadening = lorentzian(energies, energy, linewidth)
broadening /= 1000 # convert 1/eV to 1/meV
mesh_data[d_idx] += broadening
im = ax.pcolormesh(
distances,
energies,
mesh_data.T,
rasterized=True,
cmap=cmap,
norm=LogNorm(vmin=amin, vmax=amax),
shading="auto",
)
if colorbar:
pos = ax.get_position()
cax = plt.gcf().add_axes([pos.x1 + 0.035, pos.y0, 0.035, pos.height])
cbar = plt.colorbar(im, cax=cax)
cbar.ax.tick_params(axis="y", length=rcParams["ytick.major.size"] * 0.5)
cbar.ax.set_ylabel(
r"$A_\mathbf{k}$ (meV$^{-1}$)", rotation=270, va="bottom"
)
_maketicks(ax, bs_plotter, ylabel=ylabel)
_makeplot(
ax,
plot_data,
bs,
zero_to_efermi=zero_to_efermi,
width=width,
height=height,
ymin=emin,
ymax=emax,
aspect=aspect,
)
return plt
def _makeplot(
ax,
data,
bs,
zero_to_efermi=True,
ymin=-3.0,
ymax=3.0,
height=None,
width=None,
aspect=None,
):
"""Tidy the band structure & add the density of states if required."""
# draw line at Fermi level if not zeroing to e-Fermi
if not zero_to_efermi:
ytick_color = rcParams["ytick.color"]
ef = bs.efermi
ax.axhline(ef, color=ytick_color)
# set x and y limits
ax.set_xlim(0, data["distances"][-1][-1])
if bs.is_metal() and not zero_to_efermi:
ax.set_ylim(bs.efermi + ymin, bs.efermi + ymax)
else:
ax.set_ylim(ymin, ymax)
# keep correct aspect ratio for axes based on canvas size
if aspect is not False:
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
if width is None:
width = rcParams["figure.figsize"][0]
if height is None:
height = rcParams["figure.figsize"][1]
if not aspect:
aspect = height / width
ax.set_aspect(aspect * ((x1 - x0) / (y1 - y0)))
def _maketicks(ax, bs_plotter, ylabel="Energy (eV)"):
"""Utility method to add tick marks to a band structure."""
# set y-ticks
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
# set x-ticks; only plot the unique tick labels
ticks = bs_plotter.get_ticks()
unique_d = []
unique_l = []
if ticks["distance"]:
temp_ticks = list(zip(ticks["distance"], ticks["label"]))
unique_d.append(temp_ticks[0][0])
unique_l.append(temp_ticks[0][1])
for i in range(1, len(temp_ticks)):
# Append label to sequence if it is not same as predecessor
if unique_l[-1] != temp_ticks[i][1]:
unique_d.append(temp_ticks[i][0])
unique_l.append(temp_ticks[i][1])
logging.info("Label positions:")
for dist, label in list(zip(unique_d, unique_l)):
logging.info(f"\t{dist:.4f}: {label}")
ax.set_xticks(unique_d)
ax.set_xticklabels(unique_l)
ax.xaxis.grid(True)
ax.set_ylabel(ylabel)
def lorentzian(x, x0, gamma):
return 1 / np.pi * gamma / ((x - x0) ** 2 + gamma**2)
class _LinearBandStructureInterpolator:
def __init__(
self,
full_kpoints,
ir_to_full_idx,
energies,
structure,
efermi,
other_properties,
):
self.structure = structure
self.efermi = efermi
self.spins = list(energies.keys())
self.nbands = {s: len(e) for s, e in energies.items()}
full_energies = {s: e[:, ir_to_full_idx] for s, e in energies.items()}
self.bs_interpolator = PeriodicLinearInterpolator.from_data(
full_kpoints, full_energies
)
self.property_interpolators = {}
other_properties = _transpose_dict(other_properties)
for prop, prop_data in other_properties.items():
full_prop_data = {s: p[:, ir_to_full_idx] for s, p in prop_data.items()}
self.property_interpolators[prop] = PeriodicLinearInterpolator.from_data(
full_kpoints, full_prop_data, gaussian=0.75
)
def get_line_mode_band_structure(
self,
line_density: int = 50,
kpath: Optional[Kpath] = None,
symprec: Optional[float] = defaults["symprec"],
return_other_properties: bool = False,
) -> Union[
BandStructureSymmLine,
Tuple[BandStructureSymmLine, Dict[Spin, Dict[str, np.ndarray]]],
]:
"""Gets the interpolated band structure along high symmetry directions.
Args:
line_density: The maximum number of k-points between each two
consecutive high-symmetry k-points
symprec: The symmetry tolerance used to determine the space group
and high-symmetry path.
return_other_properties: Whether to include the interpolated
other_properties data for each k-point along the band structure path.
Returns:
The line mode band structure.
"""
if not kpath:
kpath = PymatgenKpath(self.structure, symprec=symprec)
kpoints, labels = kpath.get_kpoints(line_density=line_density, cart_coords=True)
labels_dict = {
label: kpoint for kpoint, label in zip(kpoints, labels) if label != ""
}
rlat = self.structure.lattice.reciprocal_lattice
frac_kpoints = rlat.get_fractional_coords(kpoints)
energies = {}
other_properties = defaultdict(dict)
for spin in self.spins:
energies[spin] = self._interpolate_spin(
spin, frac_kpoints, self.bs_interpolator
)
if return_other_properties:
for prop, property_interpolator in self.property_interpolators.items():
other_properties[spin][prop] = self._interpolate_spin(
spin, frac_kpoints, property_interpolator
)
bs = BandStructureSymmLine(
kpoints,
energies,
rlat,
self.efermi,
labels_dict,
coords_are_cartesian=True,
structure=self.structure,
)
if return_other_properties:
return bs, other_properties
else:
return bs
def _interpolate_spin(self, spin, kpoints, interpolator):
nkpoints = len(kpoints)
spin_nbands = self.nbands[spin]
ibands = np.repeat(np.arange(spin_nbands), nkpoints)
all_kpoints = np.tile(kpoints, (spin_nbands, 1))
data = interpolator.interpolate(spin, ibands, all_kpoints)
return data.reshape(spin_nbands, nkpoints)
def _transpose_dict(d):
td = defaultdict(dict)
for k1, v1 in d.items():
for k2, v2 in v1.items():
td[k2][k1] = v2
return td
def force_branches(bandstructure, other_property=None):
"""Force a linemode band structure to contain branches.
Branches give a specific portion of the path from one high-symmetry point
to another. Branches are required for the plotting methods to function correctly.
Unfortunately, due to the pymatgen BandStructure implementation they require
duplicate k-points in the band structure path. To avoid this unnecessary
computational expense, this function can reconstruct branches in band structures
without the duplicate k-points.
Args:
bandstructure: A band structure object.
other_property: Another property with the format {spin: (nbands, nkpts, ...)
to split into branches.
Returns:
A band structure with brnaches.
"""
kpoints = np.array([k.frac_coords for k in bandstructure.kpoints])
labels_dict = {k: v.frac_coords for k, v in bandstructure.labels_dict.items()}
# pymatgen band structure objects support branches. These are formed when
# two kpoints with the same label are next to each other. This bit of code
# will ensure that the band structure will contain branches, if it doesn't
# already.
dup_ids = []
high_sym_kpoints = tuple(map(tuple, labels_dict.values()))
for i, k in enumerate(kpoints):
dup_ids.append(i)
if (
tuple(k) in high_sym_kpoints
and i != 0
and i != len(kpoints) - 1
and (
not np.array_equal(kpoints[i + 1], k)
or not np.array_equal(kpoints[i - 1], k)
)
):
dup_ids.append(i)
kpoints = kpoints[dup_ids]
eigenvals = {}
projections = {}
for spin, spin_energies in bandstructure.bands.items():
eigenvals[spin] = spin_energies[:, dup_ids]
if len(bandstructure.projections) != 0:
projections[spin] = bandstructure.projections[spin][:, dup_ids]
new_property = {}
if other_property is not None:
for spin, spin_prop in other_property.items():
new_property[spin] = spin_prop[:, dup_ids]
new_bandstructure = type(bandstructure)(
kpoints,
eigenvals,
bandstructure.lattice_rec,
bandstructure.efermi,
labels_dict,
structure=bandstructure.structure,
projections=projections,
)
return new_bandstructure, new_property
|
hackingmaterials/amset
|
amset/plot/lineshape.py
|
lineshape.py
|
py
| 14,486 |
python
|
en
|
code
| 110 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "amset.plot.BaseMeshPlotter",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "amset.constants.defaults",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "amset.constants.defaults",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "amset.log.initialize_amset_logger",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pymatgen.electronic_structure.bandstructure.BandStructure",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "amset.interpolation.bandstructure.Interpolator",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.axis.Axis",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "matplotlib.axes.SubplotBase",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "sumo.plotting.pretty_plot",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pymatgen.electronic_structure.plotter.BSPlotter",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "amset.constants.hbar",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.LogNorm",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "amset.plot.styled_plot",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "amset.plot.amset_base_style",
"line_number": 91,
"usage_type": "argument"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "matplotlib.ticker.MaxNLocator",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker.AutoMinorLocator",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "amset.interpolation.periodic.PeriodicLinearInterpolator.from_data",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "amset.interpolation.periodic.PeriodicLinearInterpolator",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "amset.interpolation.periodic.PeriodicLinearInterpolator.from_data",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "amset.interpolation.periodic.PeriodicLinearInterpolator",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "sumo.symmetry.Kpath",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "amset.constants.defaults",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "sumo.symmetry.PymatgenKpath",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "pymatgen.electronic_structure.bandstructure.BandStructureSymmLine",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "pymatgen.electronic_structure.bandstructure.BandStructureSymmLine",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "pymatgen.electronic_structure.bandstructure.BandStructureSymmLine",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "pymatgen.electronic_structure.core.Spin",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "numpy.repeat",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 413,
"usage_type": "call"
}
] |
17532275577
|
import backtrader as bt
import backtrader.analyzers as btanalyzers
import matplotlib
import matplotlib.dates as mdates
from matplotlib import pyplot as plt
from datetime import datetime
import pandas as pd
import datetime as dt
# Create a subclass of Strategy to define the indicators and logic
class SMA_CrossStrategy(bt.Strategy):
def __init__(self):
ma_fast = bt.ind.SMA(period = 9)
ma_slow = bt.ind.SMA(period = 21)
self.crossover = bt.ind.CrossOver(ma_fast, ma_slow)
def next(self):
if not self.position:
if self.crossover > 0:
self.buy()
elif self.crossover < 0:
self.close()
# def next(self):
# if not self.position:
# if self.crossover > 0:
# self.buy()
# elif self.crossover < 0:
# self.sell()
# elif self.crossover < 0:
# self.close()
def main():
cerebro = bt.Cerebro()
tickers_list = ['AAPL']
df_tic = pd.read_hdf("datasets/df_SnP_500_ohlcv.h5", "df", mode = 'r')
df_tic = df_tic[df_tic['tic'].isin(tickers_list)]
df_tic = df_tic.set_index('date')
# df_tic['date'] = pd.to_datetime(df_tic['date'])
print(df_tic.head(5))
data = bt.feeds.PandasData(dataname = df_tic,
# datetime=None,
open =1,
high=2,
low=3,
close=4,
volume=6,
openinterest=-1,
timeframe = bt.TimeFrame.Days,
fromdate=dt.datetime(2023, 1, 1), # Specify the start date
todate=dt.datetime(2023, 8, 24), # Specify the end date
)
# data = bt.feeds.YahooFinanceData(dataname = 'AAPL', fromdate = datetime(2010, 1, 1), todate = datetime(2020, 1, 1))
cerebro.adddata(data)
cerebro.addstrategy(SMA_CrossStrategy)
cerebro.broker.setcash(10000.0)
cerebro.addsizer(bt.sizers.PercentSizer, percents = 100)
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name = "sharpe")
cerebro.addanalyzer(btanalyzers.Transactions, _name = "trans")
cerebro.addanalyzer(btanalyzers.TradeAnalyzer, _name = "trades")
back = cerebro.run()
cerebro.broker.getvalue()
back[0].analyzers.sharpe.get_analysis()
back[0].analyzers.trans.get_analysis()
back[0].analyzers.trades.get_analysis()
cerebro.plot(style='candlestick', barup='green', bardown='red')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gcf().autofmt_xdate() # Rotates the date labels for better visibility
if __name__=="__main__":
main()
|
erkundanec/Trading_Strategies
|
04_Backtest_Backtrader_SMA_CrossOver.py
|
04_Backtest_Backtrader_SMA_CrossOver.py
|
py
| 3,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "backtrader.Strategy",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "backtrader.ind.SMA",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "backtrader.ind",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "backtrader.ind.SMA",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "backtrader.ind",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "backtrader.ind.CrossOver",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "backtrader.ind",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "backtrader.Cerebro",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_hdf",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "backtrader.feeds.PandasData",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "backtrader.feeds",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "backtrader.TimeFrame",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "backtrader.sizers",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "backtrader.analyzers.SharpeRatio",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "backtrader.analyzers",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "backtrader.analyzers.Transactions",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "backtrader.analyzers",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "backtrader.analyzers.TradeAnalyzer",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "backtrader.analyzers",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.dates.DateFormatter",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.dates",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gcf",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
}
] |
27610406237
|
from flask import Flask, request, render_template, redirect, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from surveys import surveys, satisfaction_survey, personality_quiz
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret_code_here"
# app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
# debug = DebugToolbarExtension(app)
@app.route('/')
def choose_the_servey():
""" start-page that allows a user to choose a survey from a survey list """
return render_template('choose_survey.html', surveys=surveys.items())
@app.route('/start-page', methods=['POST'])
def start_survey():
"""render a page for the chosen survey that shows the user the title of the survey, the instructions, and a button to start the survey;
setup session variables """
current_survey=surveys[request.form.get('choice')] #current survey object
session['title'] = current_survey.title
session['num_of_questions'] = len(current_survey.questions)
session['survey'] = request.form['choice'] # key of current survey in surveys list
session['comments'] = []
return render_template('start.html', instructions = current_survey.instructions)
@app.route('/start', methods=['POST'])
def handling_start():
""" Set a current session responses-list to an empty list and redirect to the start of the survey """
session['responses'] = []
return redirect('/questions/0')
@app.route('/questions/<question_number>')
def question_page(question_number):
""" Page shows a form asking the current question, and listing the choices as radio buttons, also comments if are allowed """
answers = session['responses']
if len(answers) == session['num_of_questions']:
return render_template('thanks.html')
if len(answers) == int(question_number):
number = int(question_number)
else:
flash('Could you please answer this question, you tried to access an invalid question','error')
number = len(answers)
current_survey = surveys[session['survey']]
current_question = current_survey.questions[number]
return render_template('question.html', number = number, choices = current_question.choices, question = current_question.question, title = current_survey.title, allow_text = current_question.allow_text)
@app.route('/answer', methods=['POST'])
def handling_answer():
""" function appends the answer to responses list, adds comments if necessary and then redirect user to the next question; if no more questions in the survey - render thanks page ."""
comment = request.form.get('comment')
current_answer = request.form.get('choice')
answers = session['responses']
if current_answer:
answers.append(current_answer)
session['responses'] = answers
if comment:
comments = session['comments']
comments.append((len(answers),comment))
session['comments'] = comments
else:
flash('We are still waiting for your response!','error')
if len(answers) < session['num_of_questions']:
next_question = f"/questions/{len(answers)}"
return redirect(next_question)
else:
return render_template('thanks.html')
|
Tetyana-I/flask-survey
|
app.py
|
app.py
|
py
| 3,204 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "surveys.surveys.items",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "surveys.surveys",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "surveys.surveys",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "surveys.surveys",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 68,
"usage_type": "call"
}
] |
72344006587
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import datasets
import utils
FLAGS = tf.flags.FLAGS
def get_lr(global_step, base_lr, steps_per_epoch, # pylint: disable=missing-docstring
decay_epochs, lr_decay_factor, warmup_epochs):
warmup_lr = 0.0
if warmup_epochs > 0:
warmup_lr = (tf.cast(global_step, tf.float32) *
(base_lr / (warmup_epochs * steps_per_epoch)))
normal_lr = tf.train.piecewise_constant(
global_step,
[e * steps_per_epoch for e in decay_epochs],
[base_lr * (lr_decay_factor ** i) for i in range(len(decay_epochs) + 1)]
)
lr = tf.cond(tf.less(global_step, warmup_epochs * steps_per_epoch),
lambda: warmup_lr,
lambda: normal_lr)
return lr
# TODO(akolesnikov): add more logging
class Trainer(object):
"""Base trainer class."""
def __init__(self,
update_batchnorm_params=True):
self.update_batchnorm_params = update_batchnorm_params
split = FLAGS.get_flag_value('train_split', 'train')
num_samples = datasets.get_count(split)
steps_per_epoch = num_samples // FLAGS.batch_size
global_step = tf.train.get_or_create_global_step()
self.global_step_inc = tf.assign_add(global_step, 1)
# lr_scale_batch_size defines a canonical batch size that is coupled with
# the initial learning rate. If actual batch size is not the same as
# canonical than learning rate is linearly scaled. This is very convinient
# as this allows to vary batch size without recomputing learning rate.
lr_factor = 1.0
if FLAGS.get_flag_value('lr_scale_batch_size', 0):
lr_factor = FLAGS.batch_size / float(FLAGS.lr_scale_batch_size)
deps = FLAGS.get_flag_value('decay_epochs', None)
decay_epochs = utils.str2intlist(deps) if deps else [FLAGS.epochs]
self.lr = get_lr(
global_step,
base_lr=FLAGS.lr * lr_factor,
steps_per_epoch=steps_per_epoch,
decay_epochs=decay_epochs,
lr_decay_factor=FLAGS.get_flag_value('lr_decay_factor', 0.1),
warmup_epochs=FLAGS.get_flag_value('warmup_epochs', 0))
# TODO(marvinritter): Re-enable summaries with support for TPU training.
# tf.summary.scalar('learning_rate', self.lr)
def get_train_op(self, loss, # pylint: disable=missing-docstring
var_list=None,
add_reg_loss=True,
use_tpu=False):
if add_reg_loss:
l2_loss = tf.reduce_sum(tf.losses.get_regularization_losses())
loss += l2_loss
optimizer = FLAGS.get_flag_value('optimizer', 'sgd')
if optimizer == 'sgd':
optimizer = tf.train.MomentumOptimizer(learning_rate=self.lr,
momentum=0.9)
elif optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
else:
raise ValueError('Unknown optimizer: %s' % optimizer)
if use_tpu:
# Wrap optimizer in CrossShardOptimizer which takes care of
# synchronizing the weight updates between TPU cores.
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
opt_step = optimizer.minimize(loss, var_list=var_list,
colocate_gradients_with_ops=True)
if self.update_batchnorm_params:
opt_step = tf.group([opt_step] +
tf.get_collection(tf.GraphKeys.UPDATE_OPS))
opt_step = tf.group([opt_step, self.global_step_inc])
return opt_step
def make_estimator(mode, loss=None, eval_metrics=None, predictions=None):
"""Returns an EstimatorSpec (maybe TPU) for all modes."""
# Always use TPUEstimator, even when not using TPU, then it's (almost) no-op.
spec_type = tf.contrib.tpu.TPUEstimatorSpec
if mode == tf.estimator.ModeKeys.PREDICT:
assert predictions is not None, 'Need to pass `predict` arg.'
return spec_type(mode=mode, predictions=predictions)
if mode == tf.estimator.ModeKeys.EVAL:
return spec_type(mode=mode, loss=loss, eval_metrics=eval_metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
assert loss is not None, 'Need to pass `loss` arg.'
trainer = Trainer(update_batchnorm_params=True)
train_op = trainer.get_train_op(loss, use_tpu=FLAGS.use_tpu)
return spec_type(mode=mode, loss=loss, train_op=train_op)
raise ValueError('Unsupported mode %s' % mode)
|
google/revisiting-self-supervised
|
trainer.py
|
trainer.py
|
py
| 4,425 |
python
|
en
|
code
| 349 |
github-code
|
6
|
[
{
"api_name": "tensorflow.flags",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cast",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.piecewise_constant",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cond",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.less",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datasets.get_count",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.get_or_create_global_step",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.assign_add",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "utils.str2intlist",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tensorflow.losses.get_regularization_losses",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tensorflow.losses",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.MomentumOptimizer",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.contrib.tpu.CrossShardOptimizer",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.group",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_collection",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphKeys",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.group",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.estimator",
"line_number": 119,
"usage_type": "attribute"
}
] |
26246786176
|
import argparse
import logging
from typing import List
import torch
import torch.nn as nn
from .probe_base import ProbeBase
logger = logging.getLogger(__name__)
class OneWordNNProbe(ProbeBase):
"""
Computes all squared L2 norm of n words as depths after an MLP projection.
Can be used for probing the depth of words in a tree
"""
def __init__(
self, model_hidden_dim: int, probe_hidden_layers: int, intermediate_dim: int
):
logging.info("Constructing OneWordNNDepthProbe")
super(OneWordNNProbe, self).__init__()
initial_linear = nn.Linear(model_hidden_dim, intermediate_dim)
intermediate_layers: List[nn.Module] = []
for i in range(probe_hidden_layers):
intermediate_layers.append(nn.Linear(intermediate_dim, intermediate_dim))
if i != probe_hidden_layers - 1:
intermediate_layers.append(nn.ReLU())
self.nn_probe = nn.Sequential(initial_linear, nn.ReLU(), *intermediate_layers)
def forward(self, batch):
"""
Computes all squared L2 norm of n words as depths after an MLP projection
for each sentence in a batch. predicts the depth through an MLP
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns: A tensor of depths of shape (batch_size, max_seq_len)
"""
batch = self.nn_probe(batch)
batchlen, seqlen, rank = batch.size()
norms = torch.bmm(
batch.view(batchlen * seqlen, 1, rank),
batch.view(batchlen * seqlen, rank, 1),
)
norms = norms.view(batchlen, seqlen)
return norms
@staticmethod
def add_probe_specific_args(
parent_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parser = parent_parser.add_argument_group("ProbeArgs")
parser.add_argument(
"--probe_hidden_layers",
type=int,
default=2,
help="Number of laers in probe.",
)
parser.add_argument(
"--intermediate_dim",
type=int,
default=300,
help="Dimension the probe maps embeddings to.",
)
return parent_parser
|
VSJMilewski/multimodal-probes
|
probing_project/probes/one_word_nn_probe.py
|
one_word_nn_probe.py
|
py
| 2,291 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "probe_base.ProbeBase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 53,
"usage_type": "attribute"
}
] |
20519700810
|
"""!
@brief Cluster analysis algorithm: X-Means
@details Implementation based on papers @cite article::xmeans::1, @cite article::xmeans::mndl
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import copy
import numpy
from enum import IntEnum
from math import log
from pyclustering.cluster.encoder import type_encoding
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.core.metric_wrapper import metric_wrapper
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.xmeans_wrapper as wrapper
from pyclustering.utils import distance_metric, type_metric
class splitting_type(IntEnum):
"""!
@brief Enumeration of splitting types that can be used as splitting creation of cluster in X-Means algorithm.
"""
## Bayesian information criterion (BIC) to approximate the correct number of clusters.
## Kass's formula is used to calculate BIC:
## \f[BIC(\theta) = L(D) - \frac{1}{2}pln(N)\f]
##
## The number of free parameters \f$p\f$ is simply the sum of \f$K - 1\f$ class probabilities, \f$MK\f$ centroid coordinates, and one variance estimate:
## \f[p = (K - 1) + MK + 1\f]
##
## The log-likelihood of the data:
## \f[L(D) = n_jln(n_j) - n_jln(N) - \frac{n_j}{2}ln(2\pi) - \frac{n_jd}{2}ln(\hat{\sigma}^2) - \frac{n_j - K}{2}\f]
##
## The maximum likelihood estimate (MLE) for the variance:
## \f[\hat{\sigma}^2 = \frac{1}{N - K}\sum\limits_{j}\sum\limits_{i}||x_{ij} - \hat{C}_j||^2\f]
BAYESIAN_INFORMATION_CRITERION = 0
## Minimum noiseless description length (MNDL) to approximate the correct number of clusters @cite article::xmeans::mndl.
## Beheshti's formula is used to calculate upper bound:
## \f[Z = \frac{\sigma^2 \sqrt{2K} }{N}(\sqrt{2K} + \beta) + W - \sigma^2 + \frac{2\alpha\sigma}{\sqrt{N}}\sqrt{\frac{\alpha^2\sigma^2}{N} + W - \left(1 - \frac{K}{N}\right)\frac{\sigma^2}{2}} + \frac{2\alpha^2\sigma^2}{N}\f]
##
## where \f$\alpha\f$ and \f$\beta\f$ represent the parameters for validation probability and confidence probability.
##
## To improve clustering results some contradiction is introduced:
## \f[W = \frac{1}{n_j}\sum\limits_{i}||x_{ij} - \hat{C}_j||\f]
## \f[\hat{\sigma}^2 = \frac{1}{N - K}\sum\limits_{j}\sum\limits_{i}||x_{ij} - \hat{C}_j||\f]
MINIMUM_NOISELESS_DESCRIPTION_LENGTH = 1
class xmeans:
"""!
@brief Class represents clustering algorithm X-Means.
@details X-means clustering method starts with the assumption of having a minimum number of clusters,
and then dynamically increases them. X-means uses specified splitting criterion to control
the process of splitting clusters. Method K-Means++ can be used for calculation of initial centers.
CCORE implementation of the algorithm uses thread pool to parallelize the clustering process.
Here example how to perform cluster analysis using X-Means algorithm:
@code
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read sample 'simple3' from file.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Prepare initial centers - amount of initial centers defines amount of clusters from which X-Means will
# start analysis.
amount_initial_centers = 2
initial_centers = kmeans_plusplus_initializer(sample, amount_initial_centers).initialize()
# Create instance of X-Means algorithm. The algorithm will start analysis from 2 clusters, the maximum
# number of clusters that can be allocated is 20.
xmeans_instance = xmeans(sample, initial_centers, 20)
xmeans_instance.process()
# Extract clustering results: clusters and their centers
clusters = xmeans_instance.get_clusters()
centers = xmeans_instance.get_centers()
# Print total sum of metric errors
print("Total WCE:", xmeans_instance.get_total_wce())
# Visualize clustering results
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.append_cluster(centers, None, marker='*', markersize=10)
visualizer.show()
@endcode
Visualization of clustering results that were obtained using code above and where X-Means algorithm allocates four clusters.
@image html xmeans_clustering_simple3.png "Fig. 1. X-Means clustering results (data 'Simple3')."
By default X-Means clustering algorithm uses Bayesian Information Criterion (BIC) to approximate the correct number
of clusters. There is an example where another criterion Minimum Noiseless Description Length (MNDL) is used in order
to find optimal amount of clusters:
@code
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans, splitting_type
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read sample 'Target'.
sample = read_sample(FCPS_SAMPLES.SAMPLE_TARGET)
# Prepare initial centers - amount of initial centers defines amount of clusters from which X-Means will start analysis.
random_seed = 1000
amount_initial_centers = 3
initial_centers = kmeans_plusplus_initializer(sample, amount_initial_centers, random_state=random_seed).initialize()
# Create instance of X-Means algorithm with MNDL splitting criterion.
xmeans_mndl = xmeans(sample, initial_centers, 20, splitting_type=splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, random_state=random_seed)
xmeans_mndl.process()
# Extract X-Means MNDL clustering results.
mndl_clusters = xmeans_mndl.get_clusters()
# Visualize clustering results.
visualizer = cluster_visualizer(titles=['X-Means with MNDL criterion'])
visualizer.append_clusters(mndl_clusters, sample)
visualizer.show()
@endcode
@image html xmeans_clustering_mndl_target.png "Fig. 2. X-Means MNDL clustering results (data 'Target')."
As in many others algorithms, it is possible to specify metric that should be used for cluster analysis, for
example, Chebyshev distance metric:
@code
# Create instance of X-Means algorithm with Chebyshev distance metric.
chebyshev_metric = distance_metric(type_metric.CHEBYSHEV)
xmeans_instance = xmeans(sample, initial_centers, max_clusters_amount, metric=chebyshev_metric).process()
@endcode
@see center_initializer
"""
def __init__(self, data, initial_centers=None, kmax=20, tolerance=0.001, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=True, **kwargs):
"""!
@brief Constructor of clustering algorithm X-Means.
@param[in] data (array_like): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] initial_centers (list): Initial coordinates of centers of clusters that are represented by list: `[center1, center2, ...]`,
if it is not specified then X-Means starts from the random center.
@param[in] kmax (uint): Maximum number of clusters that can be allocated.
@param[in] tolerance (double): Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing.
@param[in] criterion (splitting_type): Type of splitting creation (by default `splitting_type.BAYESIAN_INFORMATION_CRITERION`).
@param[in] ccore (bool): Defines if C++ pyclustering library should be used instead of Python implementation.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: `repeat`, `random_state`, `metric`, `alpha`, `beta`).
<b>Keyword Args:</b><br>
- repeat (unit): How many times K-Means should be run to improve parameters (by default is `1`).
With larger `repeat` values suggesting higher probability of finding global optimum.
- random_state (int): Seed for random state (by default is `None`, current system time is used).
- metric (distance_metric): Metric that is used for distance calculation between two points (by default
euclidean square distance).
- alpha (double): Parameter distributed [0.0, 1.0] for alpha probabilistic bound \f$Q\left(\alpha\right)\f$.
The parameter is used only in case of MNDL splitting criterion, in all other cases this value is ignored.
- beta (double): Parameter distributed [0.0, 1.0] for beta probabilistic bound \f$Q\left(\beta\right)\f$.
The parameter is used only in case of MNDL splitting criterion, in all other cases this value is ignored.
"""
self.__pointer_data = numpy.array(data)
self.__clusters = []
self.__random_state = kwargs.get('random_state', None)
self.__metric = copy.copy(kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN_SQUARE)))
if initial_centers is not None:
self.__centers = numpy.array(initial_centers)
else:
self.__centers = kmeans_plusplus_initializer(data, 2, random_state=self.__random_state).initialize()
self.__kmax = kmax
self.__tolerance = tolerance
self.__criterion = criterion
self.__total_wce = 0.0
self.__repeat = kwargs.get('repeat', 1)
self.__alpha = kwargs.get('alpha', 0.9)
self.__beta = kwargs.get('beta', 0.9)
self.__ccore = ccore and self.__metric.get_type() != type_metric.USER_DEFINED
if self.__ccore is True:
self.__ccore = ccore_library.workable()
self.__verify_arguments()
def process(self):
"""!
@brief Performs cluster analysis in line with rules of X-Means algorithm.
@return (xmeans) Returns itself (X-Means instance).
@see get_clusters()
@see get_centers()
"""
if self.__ccore is True:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
def __process_by_ccore(self):
"""!
@brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).
"""
ccore_metric = metric_wrapper.create_instance(self.__metric)
result = wrapper.xmeans(self.__pointer_data, self.__centers, self.__kmax, self.__tolerance, self.__criterion,
self.__alpha, self.__beta, self.__repeat, self.__random_state,
ccore_metric.get_pointer())
self.__clusters = result[0]
self.__centers = result[1]
self.__total_wce = result[2][0]
def __process_by_python(self):
"""!
@brief Performs cluster analysis using python code.
"""
self.__clusters = []
while len(self.__centers) <= self.__kmax:
current_cluster_number = len(self.__centers)
self.__clusters, self.__centers, _ = self.__improve_parameters(self.__centers)
allocated_centers = self.__improve_structure(self.__clusters, self.__centers)
if current_cluster_number == len(allocated_centers):
break
else:
self.__centers = allocated_centers
self.__clusters, self.__centers, self.__total_wce = self.__improve_parameters(self.__centers)
def predict(self, points):
"""!
@brief Calculates the closest cluster to each point.
@param[in] points (array_like): Points for which closest clusters are calculated.
@return (list) List of closest clusters for each point. Each cluster is denoted by index. Return empty
collection if 'process()' method was not called.
An example how to calculate (or predict) the closest cluster to specified points.
@code
from pyclustering.cluster.xmeans import xmeans
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.utils import read_sample
# Load list of points for cluster analysis.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Initial centers for sample 'Simple3'.
initial_centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
# Create instance of X-Means algorithm with prepared centers.
xmeans_instance = xmeans(sample, initial_centers)
# Run cluster analysis.
xmeans_instance.process()
# Calculate the closest cluster to following two points.
points = [[0.25, 0.2], [2.5, 4.0]]
closest_clusters = xmeans_instance.predict(points)
print(closest_clusters)
@endcode
"""
nppoints = numpy.array(points)
if len(self.__clusters) == 0:
return []
self.__metric.enable_numpy_usage()
npcenters = numpy.array(self.__centers)
differences = numpy.zeros((len(nppoints), len(npcenters)))
for index_point in range(len(nppoints)):
differences[index_point] = self.__metric(nppoints[index_point], npcenters)
self.__metric.disable_numpy_usage()
return numpy.argmin(differences, axis=1)
def get_clusters(self):
"""!
@brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.
@return (list) List of allocated clusters.
@see process()
@see get_centers()
@see get_total_wce()
"""
return self.__clusters
def get_centers(self):
"""!
@brief Returns list of centers for allocated clusters.
@return (list) List of centers for allocated clusters.
@see process()
@see get_clusters()
@see get_total_wce()
"""
return self.__centers
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def get_total_wce(self):
"""!
@brief Returns sum of Euclidean Squared metric errors (SSE - Sum of Squared Errors).
@details Sum of metric errors is calculated using distance between point and its center:
\f[error=\sum_{i=0}^{N}euclidean_square_distance(x_{i}-center(x_{i}))\f]
@see process()
@see get_clusters()
"""
return self.__total_wce
def __search_optimial_parameters(self, local_data):
"""!
@brief Split data of the region into two cluster and tries to find global optimum by running k-means clustering
several times (defined by 'repeat' argument).
@param[in] local_data (list): Points of a region that should be split into two clusters.
@return (tuple) List of allocated clusters, list of centers and total WCE (clusters, centers, wce).
"""
optimal_wce, optimal_centers, optimal_clusters = float('+inf'), None, None
for _ in range(self.__repeat):
candidates = 5
if len(local_data) < candidates:
candidates = len(local_data)
local_centers = kmeans_plusplus_initializer(local_data, 2, candidates, random_state=self.__random_state).initialize()
kmeans_instance = kmeans(local_data, local_centers, tolerance=self.__tolerance, ccore=False, metric=self.__metric)
kmeans_instance.process()
local_wce = kmeans_instance.get_total_wce()
if local_wce < optimal_wce:
optimal_centers = kmeans_instance.get_centers()
optimal_clusters = kmeans_instance.get_clusters()
optimal_wce = local_wce
return optimal_clusters, optimal_centers, optimal_wce
def __improve_parameters(self, centers, available_indexes=None):
"""!
@brief Performs k-means clustering in the specified region.
@param[in] centers (list): Cluster centers, if None then automatically generated two centers using center initialization method.
@param[in] available_indexes (list): Indexes that defines which points can be used for k-means clustering, if None then all points are used.
@return (tuple) List of allocated clusters, list of centers and total WCE (clusters, centers, wce).
"""
if available_indexes and len(available_indexes) == 1:
index_center = available_indexes[0]
return [available_indexes], self.__pointer_data[index_center], 0.0
local_data = self.__pointer_data
if available_indexes:
local_data = [self.__pointer_data[i] for i in available_indexes]
local_centers = centers
if centers is None:
clusters, local_centers, local_wce = self.__search_optimial_parameters(local_data)
else:
kmeans_instance = kmeans(local_data, local_centers, tolerance=self.__tolerance, ccore=False, metric=self.__metric).process()
local_wce = kmeans_instance.get_total_wce()
local_centers = kmeans_instance.get_centers()
clusters = kmeans_instance.get_clusters()
if available_indexes:
clusters = self.__local_to_global_clusters(clusters, available_indexes)
return clusters, local_centers, local_wce
def __local_to_global_clusters(self, local_clusters, available_indexes):
"""!
@brief Converts clusters in local region define by 'available_indexes' to global clusters.
@param[in] local_clusters (list): Local clusters in specific region.
@param[in] available_indexes (list): Map between local and global point's indexes.
@return Global clusters.
"""
clusters = []
for local_cluster in local_clusters:
current_cluster = []
for index_point in local_cluster:
current_cluster.append(available_indexes[index_point])
clusters.append(current_cluster)
return clusters
def __improve_structure(self, clusters, centers):
"""!
@brief Check for best structure: divides each cluster into two and checks for best results using splitting criterion.
@param[in] clusters (list): Clusters that have been allocated (each cluster contains indexes of points from data).
@param[in] centers (list): Centers of clusters.
@return (list) Allocated centers for clustering.
"""
allocated_centers = []
amount_free_centers = self.__kmax - len(centers)
for index_cluster in range(len(clusters)):
# solve k-means problem for children where data of parent are used.
(parent_child_clusters, parent_child_centers, _) = self.__improve_parameters(None, clusters[index_cluster])
# If it's possible to split current data
if len(parent_child_clusters) > 1:
# Calculate splitting criterion
parent_scores = self.__splitting_criterion([clusters[index_cluster]], [centers[index_cluster]])
child_scores = self.__splitting_criterion([parent_child_clusters[0], parent_child_clusters[1]], parent_child_centers)
split_require = False
# Reallocate number of centers (clusters) in line with scores
if self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION:
if parent_scores < child_scores:
split_require = True
elif self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH:
# If its score for the split structure with two children is smaller than that for the parent structure,
# then representing the data samples with two clusters is more accurate in comparison to a single parent cluster.
if parent_scores > child_scores:
split_require = True
if (split_require is True) and (amount_free_centers > 0):
allocated_centers.append(parent_child_centers[0])
allocated_centers.append(parent_child_centers[1])
amount_free_centers -= 1
else:
allocated_centers.append(centers[index_cluster])
else:
allocated_centers.append(centers[index_cluster])
return allocated_centers
def __splitting_criterion(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Returns splitting criterion. High value of splitting criterion means that current structure is
much better.
@see __bayesian_information_criterion(clusters, centers)
@see __minimum_noiseless_description_length(clusters, centers)
"""
if self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION:
return self.__bayesian_information_criterion(clusters, centers)
elif self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH:
return self.__minimum_noiseless_description_length(clusters, centers)
else:
assert 0
def __minimum_noiseless_description_length(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters using minimum noiseless description length criterion.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Returns splitting criterion in line with bayesian information criterion.
Low value of splitting cretion means that current structure is much better.
@see __bayesian_information_criterion(clusters, centers)
"""
score = float('inf')
W = 0.0
K = len(clusters)
N = 0.0
sigma_square = 0.0
alpha = self.__alpha
alpha_square = alpha * alpha
beta = self.__beta
for index_cluster in range(0, len(clusters), 1):
Ni = len(clusters[index_cluster])
if Ni == 0:
return float('inf')
Wi = 0.0
for index_object in clusters[index_cluster]:
Wi += self.__metric(self.__pointer_data[index_object], centers[index_cluster])
sigma_square += Wi
W += Wi / Ni
N += Ni
if N - K > 0:
sigma_square /= (N - K)
sigma = sigma_square ** 0.5
Kw = (1.0 - K / N) * sigma_square
Ksa = (2.0 * alpha * sigma / (N ** 0.5)) * (alpha_square * sigma_square / N + W - Kw / 2.0) ** 0.5
UQa = W - Kw + 2.0 * alpha_square * sigma_square / N + Ksa
score = sigma_square * K / N + UQa + sigma_square * beta * ((2.0 * K) ** 0.5) / N
return score
def __bayesian_information_criterion(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters using bayesian information criterion.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Splitting criterion in line with bayesian information criterion.
High value of splitting criterion means that current structure is much better.
@see __minimum_noiseless_description_length(clusters, centers)
"""
scores = [float('inf')] * len(clusters) # splitting criterion
dimension = len(self.__pointer_data[0])
# estimation of the noise variance in the data set
sigma_sqrt = 0.0
K = len(clusters)
N = 0.0
for index_cluster in range(0, len(clusters), 1):
for index_object in clusters[index_cluster]:
sigma_sqrt += self.__metric(self.__pointer_data[index_object], centers[index_cluster])
N += len(clusters[index_cluster])
if N - K > 0:
sigma_sqrt /= (N - K)
p = (K - 1) + dimension * K + 1
# in case of the same points, sigma_sqrt can be zero (issue: #407)
sigma_multiplier = 0.0
if sigma_sqrt <= 0.0:
sigma_multiplier = float('-inf')
else:
sigma_multiplier = dimension * 0.5 * log(sigma_sqrt)
# splitting criterion
for index_cluster in range(0, len(clusters), 1):
n = len(clusters[index_cluster])
L = n * log(n) - n * log(N) - n * 0.5 * log(2.0 * numpy.pi) - n * sigma_multiplier - (n - K) * 0.5
# BIC calculation
scores[index_cluster] = L - p * 0.5 * log(N)
return sum(scores)
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if len(self.__centers) == 0:
raise ValueError("Initial centers are empty (size: '%d')." % len(self.__pointer_data))
if self.__tolerance < 0:
raise ValueError("Tolerance (current value: '%d') should be greater or equal to 0." %
self.__tolerance)
if self.__repeat <= 0:
raise ValueError("Repeat (current value: '%d') should be greater than 0." %
self.__repeat)
if self.__alpha < 0.0 or self.__alpha > 1.0:
raise ValueError("Parameter for the probabilistic bound Q(alpha) should in the following range [0, 1] "
"(current value: '%f')." % self.__alpha)
if self.__beta < 0.0 or self.__beta > 1.0:
raise ValueError("Parameter for the probabilistic bound Q(beta) should in the following range [0, 1] "
"(current value: '%f')." % self.__beta)
|
annoviko/pyclustering
|
pyclustering/cluster/xmeans.py
|
xmeans.py
|
py
| 28,247 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
[
{
"api_name": "enum.IntEnum",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.distance_metric",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.type_metric.EUCLIDEAN_SQUARE",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.utils.type_metric",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "pyclustering.cluster.center_initializer.kmeans_plusplus_initializer",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.type_metric.USER_DEFINED",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.utils.type_metric",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "pyclustering.core.wrapper.ccore_library.workable",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pyclustering.core.wrapper.ccore_library",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "pyclustering.core.metric_wrapper.metric_wrapper.create_instance",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pyclustering.core.metric_wrapper.metric_wrapper",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "pyclustering.core.xmeans_wrapper.xmeans",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pyclustering.core.xmeans_wrapper",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "pyclustering.cluster.encoder.type_encoding.CLUSTER_INDEX_LIST_SEPARATION",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.cluster.encoder.type_encoding",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "pyclustering.cluster.center_initializer.kmeans_plusplus_initializer",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "pyclustering.cluster.kmeans.kmeans",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "pyclustering.cluster.kmeans.kmeans",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 630,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 630,
"usage_type": "attribute"
},
{
"api_name": "math.log",
"line_number": 633,
"usage_type": "call"
}
] |
38149030065
|
import pygame
from pygame.surface import *
from pygame.sprite import Sprite
from pygame.sprite import RenderUpdates as SpriteGroup
from pygame.sprite import spritecollide
from pygame.sprite import spritecollideany
from pygame.rect import Rect
from random import *
from config import *
from log import *
screen = None
def createFrames(image):
fr_width = image.get_height()
fr_size = fr_width, fr_width
frames = []
for frame_no in range(0, image.get_width(), fr_width):
frame = pygame.Surface(fr_size)
frame.blit(image, (0,0), ((frame_no,0), fr_size))
frame.set_colorkey(PUCE)
frames.append(frame)
return frames
def initPygame():
pygame.init()
global screen
screen = pygame.display.set_mode(RESOLUTION, FLAGS)
class Widget(Sprite):
"""Use Widget class for better movement tracking
Widget class inherits from Sprite class.
Test cases for class Widget
>>> from widget import *
>>> import pygame
>>> s = pygame.surface.Surface((30,30))
>>> w = Widget(s, (0,0,30,30), (0,0))
>>> w.rect
<rect(0, 0, 30, 30)>
>>> w.update()
>>> w.rect
<rect(0, 0, 30, 30)>
>>> w.getMovement()
[0, 0]
>>> w.setX(1)
>>> w.getX()
1
>>> w.setY(4)
>>> w.getY()
4
>>> w.setMovement((3,5))
>>> w.getMovement()
(3, 5)
>>> w.getName()
(0, 0)
>>> w.setPosition((5,7))
>>> w.getPosition()
(5, 7)
"""
def __init__(self, image, rect, name=''):
"""Instantiate a widget with a given surface,
rectangle, and (x,y) movement pair.
"""
Sprite.__init__(self)
self.movement = [0, 0]
self.rect = Rect(rect)
self.lastRect = self.rect
self.image = image
self.name = name
self.frames = []
self.frameIndex = 0
self.frameRate = 1
self.timeDelay = WIDGETFRAMES
self.lastUpdate = 0
self.world = None
self.undone = False
self.id = self.rect.top + self.rect.left +\
self.rect.width + self.rect.height
def attachToWorld(self, world):
self.world = world
self.id = self.world.curWidgetID
self.world.curWidgetID += 1
def startAnimation(self, frames, startIndex, frameRate):
self.frames = frames
self.frameIndex = startIndex
self.frameRate = frameRate
self.image = self.frames[startIndex]
self.lastUpdate = self.timeDelay
def __str__(self):
return str(self.rect.left) + str(self.rect.top) + str(self.id)
def setMovement(self, vector):
"""Set movement with a pair"""
if(self.movement != [0,0]
and vector == [0,0]):
self.world.dirtyGroup.add(self)
self.movement = vector
def getMovement(self):
"""Return movement as a pair"""
return self.movement
def setStop(self):
"""Set movement to 0"""
self.setMovement([0,0])
def setY(self, y):
"""Set y-component of movement"""
self.movement[1] = y
def setX(self, x):
"""Set x-component of movement"""
self.movement[0] = x
def getX(self):
"""Get x-component of movement"""
return self.movement[0]
def getY(self):
"""Set y-component of movement"""
return self.movement[1]
def setPosition(self, pair):
"""Set x and y coords of Widget"""
self.rect.topleft = pair
def getPosition(self):
"""Get x and y coords of Widget"""
return self.rect.topleft
def update(self):
"""Move sprite according to its movement vector"""
# Widget needs to be animated
if (len(self.frames) > 0):
if self.lastUpdate <= 0:
self.frameIndex = (self.frameIndex+1)%(len(self.frames))
self.image = self.frames[self.frameIndex]
self.lastUpdate = self.timeDelay
self.world.dirtyGroup.add(self)
else:
self.lastUpdate -= 1
elif(self.getMovement != [0,0]):
self.lastRect = Rect(self.rect)
self.rect.move_ip(self.movement)
self.world.dirtyGroup.add(self)
def undoUpdate(self):
"""Widget returns to state prior to last update()"""
self.rect = self.lastRect
def getShadow(self):
shadow = Sprite()
shadow.rect = self.lastRect.move(0,0)
return shadow
def getName(self):
"""Get name of Widget"""
return self.name
class WorldlessWidget(Widget):
def update(self):
"""Move sprite according to its movement vector"""
# Widget needs to be animated
if (len(self.frames) > 0):
if self.lastUpdate <= 0:
self.frameIndex = (self.frameIndex+1)%(len(self.frames))
self.image = self.frames[self.frameIndex]
self.lastUpdate = self.timeDelay
self.lastRect = Rect(self.rect)
self.rect.move_ip(self.movement)
|
mikedll/pybomber2
|
desktop/widget.py
|
widget.py
|
py
| 5,122 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pygame.Surface",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.Sprite",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "pygame.rect.Rect",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pygame.rect.Rect",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.Sprite",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pygame.rect.Rect",
"line_number": 185,
"usage_type": "call"
}
] |
18659256455
|
import asyncpg
from app.main import create_app
from app.settings import Settings
from tests.test_currencies import test_currencies_success
settings = Settings()
async def test_rate_success(aiohttp_client, loop):
currencies = await test_currencies_success(aiohttp_client, loop)
currency = currencies['results']['currencies'][0]['id']
client = await aiohttp_client(create_app)
conn = await asyncpg.connect(settings.dsn)
try:
resp = await client.get(f'/rate/{currency}/')
assert resp.status == 200
result = await resp.json()
assert 'current_rate' in result
assert 'avg_volume' in result
resp = await client.get(f'/rate/?id={currency}')
assert resp.status == 200
result = await resp.json()
assert 'current_rate' in result
assert 'avg_volume' in result
finally:
await conn.close()
async def test_rate_not_found(aiohttp_client, loop):
curency_name = 'BCH'
client = await aiohttp_client(create_app)
conn = await asyncpg.connect(settings.dsn)
try:
currency = await conn.fetchval('select id from currency where name=$1', curency_name)
await conn.execute('delete from currency where name=$1', curency_name)
resp = await client.get(f'/rate/{currency}/')
assert resp.status == 404
result = await resp.json()
assert 'error' in result
assert 'detail' in result
assert 'not found transaction' == result['detail']
finally:
await conn.close()
async def test_rate_error(aiohttp_client, loop):
curency_name = 'BCH'
client = await aiohttp_client(create_app)
resp = await client.get(f'/rate/{curency_name}/')
assert resp.status == 400
|
ridhid/test_aio
|
tests/test_rate.py
|
test_rate.py
|
py
| 1,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "app.settings.Settings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tests.test_currencies.test_currencies_success",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.main.create_app",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "asyncpg.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.main.create_app",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "asyncpg.connect",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "app.main.create_app",
"line_number": 52,
"usage_type": "argument"
}
] |
25875832480
|
#!/usr/bin/env python
# This is more of a work in progress, but this script will
# test the code for creating vespagrams with our curved wavefront correction.
import obspy
import numpy as np
import time
import matplotlib.pyplot as plt
import circ_array as c
from circ_beam import Vespagram_Lin, Vespagram_PWS, Baz_vespagram_PWS, Baz_vespagram_Lin
from array_plotting import plotting
# parameters
# phase of interest
phase = 'SKS'
phases = ['SKS','SKKS','ScS','Sdiff','sSKS','sSKKS','PS','SKKKS','pSKS']
# frequency band
fmin = 0.13
fmax = 0.26
st = obspy.read('./data/19970525/*SAC')
# get array metadata
event_time = c.get_eventtime(st)
geometry = c.get_geometry(st)
distances = c.get_distances(st,type='deg')
mean_dist = np.mean(distances)
stations = c.get_stations(st)
# get travel time information and define a window
Target_phase_times, time_header_times = c.get_predicted_times(st,phase)
avg_target_time = np.mean(Target_phase_times)
min_target = int(np.nanmin(Target_phase_times, axis=0))
max_target = int(np.nanmax(Target_phase_times, axis=0)) + 100
stime = event_time + min_target
etime = event_time + max_target
# trim the stream
# Normalise and cut seismogram around defined window
st = st.copy().trim(starttime=stime, endtime=etime)
st = st.normalize()
# get predicted slownesses and backazimuths
predictions = c.pred_baz_slow(
stream=st, phases=phases, one_eighty=True)
# find the line with the predictions for the phase of interest
row = np.where((predictions == phase))[0]
P, S, BAZ, PRED_BAZ_X, PRED_BAZ_Y, PRED_AZ_X, PRED_AZ_Y, DIST, TIME = predictions[row, :][0]
# make the box around the prediction to search over
smin=float(S)-2
smax=float(S)+6
s_step=0.1
# filter
st = st.filter('bandpass', freqmin=fmin, freqmax=fmax,
corners=4, zerophase=True)
# get the traces and phase traces
Traces = c.get_traces(st)
Phase_traces = c.get_phase_traces(st)
# get sampleing rate
sampling_rate=st[0].stats.sampling_rate
# slowness vespagrams
vesp_lin = Vespagram_Lin(traces=Traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, baz=float(BAZ), smin=smin, smax=smax, s_space=s_step)
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(211)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_lin, ymin=smin, ymax=smax, y_space=s_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Slow - Lin", predictions=predictions, type='slow',
envelope=True)
vesp_pws = Vespagram_PWS(traces=Traces, phase_traces=Phase_traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, baz=float(BAZ), smin=smin, smax=smax, s_space=s_step, degree=2)
ax = fig.add_subplot(212)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_pws, ymin=smin, ymax=smax, y_space=s_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Slow - PWS", predictions=predictions, type='slow',
envelope=True)
plt.tight_layout()
plt.show()
# backazimuth vespagrams
bmin=float(BAZ)-30
bmax=float(BAZ)+30
b_step=0.05
vesp_lin = Baz_vespagram_Lin(traces=Traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, slow=float(S), bmin=bmin, bmax=bmax, b_space=b_step)
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(211)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_lin, ymin=bmin, ymax=bmax, y_space=b_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Baz - Lin", predictions=predictions, type='baz',
envelope=True)
vesp_pws = Baz_vespagram_PWS(traces=Traces, phase_traces=Phase_traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, slow=float(S), bmin=bmin, bmax=bmax, b_space=b_step, degree=2)
ax = fig.add_subplot(212)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_pws, ymin=bmin, ymax=bmax, y_space=b_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Baz - PWS", predictions=predictions, npeaks=5, type='baz',
envelope=True)
plt.tight_layout()
plt.show()
|
eejwa/Array_Seis_Circle
|
examples/Vespagram_test.py
|
Vespagram_test.py
|
py
| 4,213 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "obspy.read",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "circ_array.get_eventtime",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "circ_array.get_geometry",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "circ_array.get_distances",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "circ_array.get_stations",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "circ_array.get_predicted_times",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.nanmin",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "circ_array.pred_baz_slow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "circ_array.get_traces",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "circ_array.get_phase_traces",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "circ_beam.Vespagram_Lin",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "array_plotting.plotting",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "circ_beam.Vespagram_PWS",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "array_plotting.plotting",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "circ_beam.Baz_vespagram_Lin",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "array_plotting.plotting",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "circ_beam.Baz_vespagram_PWS",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "array_plotting.plotting",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
}
] |
30610778506
|
"""
Meta manager. Defines complex workflow in terms of lower level managers
For usage example see tests
"""
import re
from time import time
import logging
from collections import defaultdict, OrderedDict as odict
from copy import copy, deepcopy
import yaml
from shub_workflow.base import WorkFlowManager
from .utils import get_scheduled_jobs_specs
logger = logging.getLogger(__name__)
_STARTING_JOB_RE = re.compile("--starting-job(?:=(.+))?")
class GraphManager(WorkFlowManager):
jobs_graph = {}
base_failed_outcomes = ('failed', 'killed by oom', 'cancelled', 'cancel_timeout', 'memusage_exceeded',
'cancelled (stalled)')
def __init__(self):
self.__failed_outcomes = list(self.base_failed_outcomes)
# Ensure jobs are traversed in the same order as they went pending.
self.__pending_jobs = odict()
self.__running_jobs = odict()
self._available_resources = {} # map resource : ammount
self._acquired_resources = defaultdict(list) # map resource : list of (job, ammount)
self.__tasks = {}
super(GraphManager, self).__init__()
self.__start_time = defaultdict(time)
for task in self.configure_workflow() or ():
self._add_task(task)
@property
def description(self):
return f"Workflow manager for {self.name!r}"
def _add_task(self, task):
assert task.task_id not in self.jobs_graph,\
"Workflow inconsistency detected: task %s referenced twice." % task.task_id
self.jobs_graph[task.task_id] = task.as_jobgraph_dict()
self.__tasks[task.task_id] = task
for ntask in task.get_next_tasks():
self._add_task(ntask)
def configure_workflow(self):
raise NotImplementedError("configure_workflow() method need to be implemented.")
def on_start(self):
if not self.jobs_graph:
self.argparser.error('Jobs graph configuration is empty.')
if not self.args.starting_job and not self.args.resume_from_jobid:
self.argparser.error('You must provide either --starting-job or --resume-from-jobid.')
self._fill_available_resources()
ran_tasks = self._maybe_setup_resume()
self._setup_starting_jobs(ran_tasks)
self.workflow_loop_enabled = True
logger.info("Starting '%s' workflow", self.name)
def _get_starting_jobs_from_resumed_job(self):
starting_jobs = []
job = self.get_project().jobs.get(self.args.resume_from_jobid)
next_option_is_task = False
for option in job.metadata.get('job_cmd'):
if next_option_is_task:
starting_jobs.append(option)
else:
m = _STARTING_JOB_RE.match(option)
if m:
task = m.groups()[0]
if m:
starting_jobs.append(task)
else:
next_option_is_task = True
return starting_jobs
def _maybe_setup_resume(self):
ran_tasks = []
if self.args.resume_from_jobid:
# fill tasks job ids
logger.info("Will Resume from job (%s)", self.args.resume_from_jobid)
for _, name, jobid in get_scheduled_jobs_specs(self, [self.args.resume_from_jobid]):
mname, taskid = name.split('/')
assert mname == self.name, "Resuming from wrong manager job: %s" % self.args.resume_from_jobid
self.__tasks[taskid].append_jobid(jobid)
ran_tasks.append(taskid)
return ran_tasks
def _setup_starting_jobs(self, ran_tasks, candidates=None):
candidates = candidates or self.args.starting_job
if not candidates: # resuming
candidates = self._get_starting_jobs_from_resumed_job()
for taskid in candidates:
if taskid in ran_tasks:
logger.info("Task %s already done %s.", taskid, tuple(self.__tasks[taskid].get_scheduled_jobs()))
next_tasks = [t.task_id for t in self.__tasks[taskid].get_next_tasks()]
if next_tasks:
self._setup_starting_jobs(ran_tasks, next_tasks)
else:
self._add_initial_pending_job(taskid)
logger.info("Resuming at task %s", taskid)
def _fill_available_resources(self):
"""
Ensure there are enough starting resources in order every job
can run at some point
"""
for job in self.jobs_graph.keys():
for required_resources in self.__tasks[job].get_required_resources():
for resource, req_amount in required_resources.items():
old_amount = self._available_resources.get(resource, 0)
if old_amount < req_amount:
logger.info("Increasing available resources count for %r"
" from %r to %r. Old value was not enough"
" for job %r to run.",
resource, old_amount, req_amount, job)
self._available_resources[resource] = req_amount
def get_job(self, job, pop=False):
if job not in self.jobs_graph:
self.argparser.error('Invalid job: %s. Available jobs: %s' % (job, repr(self.jobs_graph.keys())))
if pop:
return self.jobs_graph.pop(job)
return self.jobs_graph[job]
def _add_initial_pending_job(self, job):
wait_for = self.get_job(job).get('wait_for', [])
self._add_pending_job(job, wait_for=tuple(wait_for))
def _add_pending_job(self, job, wait_for=(), is_retry=False):
if job in self.args.skip_job:
return
if job in self.__tasks:
task = self.__tasks[job]
parallelization = task.get_parallel_jobs()
else:
task_id = self.get_job(job).get('origin', job)
task = self.__tasks[task_id]
parallelization = 1
if parallelization == 1:
self.__pending_jobs[job] = {
'wait_for': set(wait_for),
'is_retry': is_retry,
'wait_time': task.wait_time,
}
else:
# Split parallelized task into N parallel jobs.
basejobconf = self.get_job(job, pop=True)
for i in range(parallelization):
job_unit = "%s_%i" % (job, i)
job_unit_conf = deepcopy(basejobconf)
job_unit_conf['origin'] = job
job_unit_conf['index'] = i
for _, nextjobs in job_unit_conf.get('on_finish', {}).items():
if i != 0: # only job 0 will conserve finish targets
for nextjob in copy(nextjobs):
if nextjob != 'retry':
if nextjob in self.jobs_graph:
self.get_job(nextjob).setdefault('wait_for', []).append(job_unit)
if nextjob in self.__pending_jobs:
self.__pending_jobs[nextjob]['wait_for'].add(job_unit)
else:
for i in range(parallelization):
nextjobp = "%s_%i" % (job, i)
self.get_job(nextjobp).get('wait_for', []).append(job_unit)
if nextjobp in self.__pending_jobs:
self.__pending_jobs[nextjobp]['wait_for'].add(job_unit)
nextjobs.remove(nextjob)
self.jobs_graph[job_unit] = job_unit_conf
self.__pending_jobs[job_unit] = {
'wait_for': set(wait_for),
'is_retry': is_retry,
'origin': job,
'wait_time': task.wait_time,
}
for other, oconf in self.jobs_graph.items():
if job in oconf.get('wait_for', []):
oconf['wait_for'].remove(job)
if other in self.__pending_jobs:
self.__pending_jobs[other]['wait_for'].discard(job)
for i in range(parallelization):
job_unit = "%s_%i" % (job, i)
oconf['wait_for'].append(job_unit)
if other in self.__pending_jobs:
self.__pending_jobs[other]['wait_for'].add(job_unit)
def add_argparser_options(self):
super(GraphManager, self).add_argparser_options()
self.argparser.add_argument('--jobs-graph', help='Define jobs graph_dict on command line', default='{}')
self.argparser.add_argument('--starting-job', action='append', default=[],
help='Set starting jobs. Can be given multiple times.')
self.argparser.add_argument('--only-starting-jobs', action='store_true',
help='If given, only run the starting jobs (don\'t follow on finish next jobs)')
self.argparser.add_argument('--comment', help='Can be used for differentiate command line and avoid scheduling '
'fail when a graph manager job is scheduled when another one with same option '
'signature is running. Doesn\'t do anything else.')
self.argparser.add_argument('--skip-job', default=[], action='append',
help='Skip given job. Can be given multiple times. Also next jobs for the skipped'
'one will be skipped.')
self.argparser.add_argument('--resume-from-jobid', help='Resume from the given graph manager jobid')
def parse_args(self):
args = super(GraphManager, self).parse_args()
self.jobs_graph = yaml.load(args.jobs_graph) or deepcopy(self.jobs_graph)
if not self.name:
self.argparser.error('Manager name not set.')
return args
def workflow_loop(self):
logger.debug("Pending jobs: %r", self.__pending_jobs)
logger.debug("Running jobs: %r", self.__running_jobs)
logger.debug("Available resources: %r", self._available_resources)
logger.debug("Acquired resources: %r", self._acquired_resources)
self.check_running_jobs()
if self.__pending_jobs:
self.run_pending_jobs()
elif not self.__running_jobs:
return False
return True
def run_job(self, job, is_retry=False):
task = self.__tasks.get(job)
if task is not None:
return task.run(self, is_retry)
jobconf = self.get_job(job)
task = self.__tasks.get(jobconf['origin'])
if task is not None:
idx = jobconf['index']
return task.run(self, is_retry, index=idx)
def _must_wait_time(self, job):
status = self.__pending_jobs[job]
if status['wait_time'] is not None:
wait_time = status['wait_time'] - time() + self.__start_time[job]
if wait_time > 0:
logger.info("Job %s must wait %d seconds for running", job, wait_time)
return True
return False
def run_pending_jobs(self):
"""Try running pending jobs.
Normally, only jobs that have no outstanding dependencies are started.
If all pending jobs have outstanding dependencies, try to start one job
ignoring unknown tasks, i.e. those that are not currently pending.
If none of the pending jobs cannot be started either way, it means
there's a dependency cycle, in this case an error is raised.
"""
# Normal mode: start jobs without dependencies.
for job in sorted(self.__pending_jobs.keys()):
if len(self.__running_jobs) >= self.max_running_jobs:
break
status = self.__pending_jobs[job]
job_can_run = not status['wait_for'] and not self._must_wait_time(job) and self._try_acquire_resources(job)
if job_can_run:
try:
jobid = self.run_job(job, status['is_retry'])
except:
self._release_resources(job)
raise
self.__pending_jobs.pop(job)
self.__running_jobs[job] = jobid
if not self.__pending_jobs or self.__running_jobs or \
any(status['wait_time'] is not None for status in self.__pending_jobs.values()):
return
# At this point, there are pending jobs, but none were started because
# of dependencies, try "skip unknown deps" mode: start one job that
# only has "unseen" dependencies to try to break the "stalemate."
origin_job = None
for job in sorted(self.__pending_jobs.keys()):
if len(self.__running_jobs) >= self.max_running_jobs:
break
status = self.__pending_jobs[job]
job_can_run = (
all(w not in self.__pending_jobs for w in status['wait_for']) and
(not origin_job or status.get('origin') == origin_job) and
self._try_acquire_resources(job))
origin_job = status.get('origin')
if job_can_run:
try:
jobid = self.run_job(job, status['is_retry'])
except:
self._release_resources(job)
raise
self.__pending_jobs.pop(job)
self.__running_jobs[job] = jobid
if not origin_job and self.__running_jobs:
return
if self.__running_jobs:
return
# Nothing helped, all pending jobs wait for each other somehow.
raise RuntimeError("Job dependency cycle detected: %s" % ', '.join(
'%s waits for %s' % (
job, sorted(self.__pending_jobs[job]['wait_for']))
for job in sorted(self.__pending_jobs.keys())))
def check_running_jobs(self):
for job, jobid in list(self.__running_jobs.items()):
outcome = self.is_finished(jobid)
if outcome is not None:
logger.info('Job "%s/%s" (%s) finished', self.name, job, jobid)
for st in self.__pending_jobs.values():
st['wait_for'].discard(job)
for conf in self.jobs_graph.values():
if job in conf.get('wait_for', []):
conf['wait_for'].remove(job)
for nextjob in self._get_next_jobs(job, outcome):
if nextjob == 'retry':
jobconf = self.get_job(job)
retries = jobconf.get('retries', 0)
if retries > 0:
self._add_pending_job(job, is_retry=True)
jobconf['retries'] -= 1
logger.warning('Will retry job %s (outcome: %s, number of retries left: %s)',
job, outcome, jobconf['retries'])
elif nextjob in self.__pending_jobs:
logger.error('Job %s already pending', nextjob)
else:
wait_for = self.get_job(nextjob).get('wait_for', [])
self._add_pending_job(nextjob, wait_for)
self._release_resources(job)
self.__running_jobs.pop(job)
else:
logger.info("Job %s (%s) still running", job, jobid)
def _try_acquire_resources(self, job):
result = True
task_id = self.get_job(job).get('origin', job)
for required_resources in self.__tasks[task_id].get_required_resources(partial=True):
for resource, req_amount in required_resources.items():
if self._available_resources[resource] < req_amount:
result = False
break
else:
for resource, req_amount in required_resources.items():
self._available_resources[resource] -= req_amount
self._acquired_resources[resource].append((job, req_amount))
return True
return result
def _release_resources(self, job):
for res, acquired in self._acquired_resources.items():
for rjob, res_amount in acquired:
if rjob == job:
self._available_resources[res] += res_amount
self._acquired_resources[res].remove((rjob, res_amount))
def _get_next_jobs(self, job, outcome):
if self.args.only_starting_jobs:
return []
on_finish = self.get_job(job).get('on_finish', {})
if outcome in on_finish:
nextjobs = on_finish[outcome]
elif outcome in self.__failed_outcomes:
nextjobs = on_finish.get('failed', [])
else:
nextjobs = on_finish.get('default', [])
return nextjobs
@property
def pending_jobs(self):
return self.__pending_jobs
|
hermit-crab/shub-workflow
|
shub_workflow/graph/__init__.py
|
__init__.py
|
py
| 17,269 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "shub_workflow.base.WorkFlowManager",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "utils.get_scheduled_jobs_specs",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 249,
"usage_type": "call"
}
] |
18028682844
|
from pyfiglet import Figlet
import os
from shutil import copyfile
import shutil
import sqlite3
import subprocess
import winreg
import base64
import subprocess
import datetime
import socket
import ctypes
def init():
f = Figlet(font='slant')
print(f.renderText('Reboot'))
print("This program is Artifact collecter\n")
def showOptions():
print("\n==========Options==========")
print("1) Memory Dump")
print("2) Registry Hive")
print("3) System Info")
print("4) System Audit Policy")
print("5) Group Policy")
print("6) Event Viewer Log")
print("7) Services Log")
print("8) Hosts Data")
print("9) SRUM (System Resource Utilization Monitor)")
print("10) Environment Variables")
print("11) Patch List")
print("12) Process List")
print("13) Opened Port")
print("14) IP Config Info")
print("15) ARP Info")
print("16) Net BIOS")
print("17) Opened Handle")
print("18) Task Schedule Info")
print("19) System Logon Info")
print("20) UserAssist")
print("21) AutoRun")
print("22) Registry User")
print("23) Internet Browser History")
print("24) Recycle Bin")
print("25) LNK File")
print("26) PowerShell Log File")
print("27) Registerd Service Info")
print("28) Recent Activity Info")
print("29) Prefetch")
print("30) NTFS Artifact")
print("777) ALL")
print("0) exit program")
def registryHive():
print("\n==========Registry Hive File==========")
current_directory = os.getcwd()
export_registry_hive(current_directory, winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE")
export_registry_hive(current_directory, winreg.HKEY_CURRENT_USER, r"Software")
def export_registry_hive(output_directory, hive_name, hive_path):
registry_folder = os.path.join(output_directory, "RegistryHive")
if not os.path.exists(registry_folder):
os.makedirs(registry_folder)
try:
with winreg.ConnectRegistry(None, hive_name) as hive:
hive_output_file = os.path.join(registry_folder, f"{hive_name}_hive.txt")
with open(hive_output_file, 'w', encoding='utf-16') as file:
def export_subkeys(key, indent=""):
for i in range(winreg.QueryInfoKey(key)[0]):
subkey_name = winreg.EnumKey(key, i)
subkey_path = os.path.join(hive_path, subkey_name)
file.write(f"{indent}[{subkey_path}]\n")
with winreg.OpenKey(key, subkey_name) as subkey:
export_subkeys(subkey, indent + " ")
export_subkeys(hive)
print(f"{hive_name} hive exported to {hive_output_file}")
except Exception as e:
print(f"Error: {e}")
def systemInfo():
print("\n==========System Info File==========")
subprocess.run(['systeminfo'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, encoding='utf-8')
output_directory = os.getcwd()
system_info_folder = os.path.join(output_directory, "SystemInfo")
if not os.path.exists(system_info_folder):
os.makedirs(system_info_folder)
try:
system_info_output_file = os.path.join(system_info_folder, "system_info.txt")
with open(system_info_output_file, 'w', encoding='utf-8') as file:
file.write(subprocess.getoutput('systeminfo'))
print(f"System info exported to {system_info_output_file}")
except Exception as e:
print(f"Error: {e}")
def systemAudit():
print("\n==========System Audit Policy File==========")
subprocess.run(['auditpol', '/get', '/category:*'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, encoding='utf-8')
output_directory = os.getcwd()
audit_policy_folder = os.path.join(output_directory, "AuditPolicy")
if not os.path.exists(audit_policy_folder):
os.makedirs(audit_policy_folder)
try:
audit_policy_output_file = os.path.join(audit_policy_folder, "audit_policy_info.txt")
with open(audit_policy_output_file, 'w', encoding='utf-8') as file:
file.write(subprocess.getoutput('auditpol /get /category:*'))
print(f"Audit policy info exported to {audit_policy_output_file}")
except Exception as e:
print(f"Error: {e}")
def groupPolicy():
print("\n==========Group Policy File==========")
subprocess.run(['gpresult', '/R'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, encoding='utf-8')
output_directory = os.getcwd()
group_policy_folder = os.path.join(output_directory, "GroupPolicy")
if not os.path.exists(group_policy_folder):
os.makedirs(group_policy_folder)
try:
group_policy_output_file = os.path.join(group_policy_folder, "group_policy_info.txt")
with open(group_policy_output_file, 'w', encoding='utf-8') as file:
file.write(subprocess.getoutput('gpresult /R'))
print(f"Group policy info exported to {group_policy_output_file}")
except Exception as e:
print(f"Error: {e}")
def eventLog():
print("\n==========Event Viewer Log File==========")
output_directory = os.getcwd()
event_logs_folder = os.path.join(output_directory, "EventLogs")
if not os.path.exists(event_logs_folder):
os.makedirs(event_logs_folder)
try:
# 시스템 이벤트 로그 내용 가져오기
system_log_output_file = os.path.join(event_logs_folder, "system_event_log.txt")
subprocess.run(['wevtutil', 'qe', 'System', '/f:text', '/c:1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, encoding='utf-8')
with open(system_log_output_file, 'w', encoding='utf-8') as file:
file.write(subprocess.getoutput(f'wevtutil qe System /f:text /c:1'))
print(f"System event log exported to {system_log_output_file}")
# 응용 프로그램 이벤트 로그 내용 가져오기
application_log_output_file = os.path.join(event_logs_folder, "application_event_log.txt")
subprocess.run(['wevtutil', 'qe', 'Application', '/f:text', '/c:1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, encoding='utf-8')
with open(application_log_output_file, 'w', encoding='utf-8') as file:
file.write(subprocess.getoutput(f'wevtutil qe Application /f:text /c:1'))
print(f"Application event log exported to {application_log_output_file}")
except Exception as e:
print(f"Error: {e}")
def serviceLog():
print("\n==========Service Log File==========")
output_directory = os.getcwd()
service_log_folder = os.path.join(output_directory, "ServiceLog")
if not os.path.exists(service_log_folder):
os.makedirs(service_log_folder)
try:
# 서비스 로그를 가져와서 파일로 저장
service_log_output_file = os.path.join(service_log_folder, "service_log.txt")
subprocess.run(['net', 'start'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, encoding='utf-8')
with open(service_log_output_file, 'w', encoding='utf-8') as file:
file.write(subprocess.getoutput('net start'))
print(f"Service log exported to {service_log_output_file}")
except Exception as e:
print(f"Error: {e}")
def hostsData():
print("\n==========Hosts Data File==========")
output_directory = os.getcwd()
hosts_folder = os.path.join(output_directory, "Hosts")
if not os.path.exists(hosts_folder):
os.makedirs(hosts_folder)
try:
hosts_file_path = r"C:\Windows\System32\drivers\etc\hosts" # Hosts 파일 경로
hosts_output_file = os.path.join(hosts_folder, "hosts.txt")
with open(hosts_file_path, 'r', encoding='utf-8') as input_file, open(hosts_output_file, 'w', encoding='utf-8') as output_file:
hosts_content = input_file.read()
output_file.write(hosts_content)
print(f"Hosts file exported to {hosts_output_file}")
except Exception as e:
print(f"Error: {e}")
def srum():
print("\n==========SRUM File==========")
output_directory = os.getcwd()
srum_folder = os.path.join(output_directory, "SRUM")
if not os.path.exists(srum_folder):
os.makedirs(srum_folder)
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\SRUM\Extensions", 0, winreg.KEY_READ) as key:
i = 0
while True:
try:
subkey_name = winreg.EnumKey(key, i)
subkey_path = os.path.join(r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\SRUM\Extensions", subkey_name)
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey_path, 0, winreg.KEY_READ) as subkey:
j = 0
while True:
try:
value_name, srum_data, _ = winreg.EnumValue(subkey, j)
# 각 데이터를 파일로 저장 (바이너리 모드 또는 문자열 모드로 열기)
srum_output_file = os.path.join(srum_folder, f"{subkey_name}_{value_name}.txt")
with open(srum_output_file, 'wb' if isinstance(srum_data, bytes) else 'w') as file:
if isinstance(srum_data, bytes):
file.write(srum_data)
else:
file.write(str(srum_data))
print(f"SRUM data from {subkey_name}/{value_name} exported to {srum_output_file}")
j += 1
except OSError as e:
break
i += 1
except OSError as e:
break
if i == 0:
print("No SRUM data found.")
except Exception as e:
print(f"Error: {e}")
def environmentVar():
print("\n==========Envionment Variable File==========")
output_directory = os.getcwd()
environ_var_folder = os.path.join(output_directory, "EnvironmentVar")
if not os.path.exists(environ_var_folder):
os.makedirs(environ_var_folder)
output_file = os.path.join(environ_var_folder, "environment_variables.txt")
try:
with open(output_file, 'w') as file:
for key, value in os.environ.items():
file.write(f"{key} = {value}\n")
print(f"Environment variables exported to {output_file}")
except Exception as e:
print(f"Error: {e}")
def patchList():
print("\n==========Patch List File==========")
output_directory = os.getcwd()
patch_list_folder = os.path.join(output_directory, "PatchList")
if not os.path.exists(patch_list_folder):
os.makedirs(patch_list_folder)
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\Packages", 0, winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as key:
i = 0
while True:
try:
package_name = winreg.EnumKey(key, i)
patch_list_file = os.path.join(patch_list_folder, f"{package_name}.txt")
with open(patch_list_file, 'w') as file:
file.write(package_name)
i += 1
except OSError as e:
break
if i > 0:
print(f"Patch list exported to {patch_list_folder}")
else:
print("No patch list information found.")
except Exception as e:
print(f"Error: {e}")
def processList():
print("\n==========Process List File==========")
file_path = os.getcwd()
file_path = os.path.join(file_path, "ProcessList")
if not os.path.exists(file_path):
os.makedirs(file_path)
file_path = os.path.join(file_path, "process_list.txt")
process_info_list = []
for process in psutil.process_iter(attrs=['pid', 'name', 'username', 'memory_info']):
info = process.info
pid = info['pid']
name = info['name']
username = info['username']
memory = info['memory_info'].rss # Resident Set Size: 메모리 사용량
process_info = f"PID: {pid}, Process Name: {name}, User: {username}, Memory: {memory} bytes"
process_info_list.append(process_info)
with open(file_path, 'w') as f:
for process in process_info_list:
f.write(process + '\n')
print(f"실행 중인 프로세스 정보가 {file_path} 에 저장되었습니다.")
def openPort():
print("\n==========Open Port File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "OpenPort")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
file_path = os.path.join(destination_folder, "open_ports.txt")
open_ports = []
for conn in psutil.net_connections(kind='inet'):
laddr, raddr, status, pid = conn.laddr, conn.raddr, conn.status, conn.pid
if raddr:
open_ports.append(f"{laddr} <--> {raddr} {status} {pid}")
else:
open_ports.append(f"{laddr} {status} {pid}")
with open(file_path, 'w') as f:
for port in open_ports:
f.write(f"{port}\n")
print(f"Open port information has been saved to {file_path}")
def IPConfigInfo():
print("\n==========IP Config File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "IPConfig")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
ip_info = {}
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
ip_info["Hostname"] = hostname
ip_info["Local IP"] = local_ip
interfaces = psutil.net_if_addrs()
for interface, addrs in interfaces.items():
ip_info[interface] = []
for addr in addrs:
ip_info[interface].append(str(addr))
filename = os.path.join(destination_folder, f"ip_info_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
with open(filename, 'w') as f:
for key, value in ip_info.items():
f.write(f"{key}: {value}\n")
print(f"IP 설정 정보가 {filename} 에 저장되었습니다.")
def arpInfo():
print("\n==========ARP Info File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "ARPInfo")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
try:
# 'arp -a' 명령을 실행하고 결과를 반환받습니다.
arp_output = subprocess.check_output("arp -a", shell=True, stderr=subprocess.STDOUT, text=True)
except subprocess.CalledProcessError as e:
arp_output = f"An error occurred while trying to fetch ARP info: {str(e)}"
# 파일 이름에 현재 시간을 추가하여 고유하게 만듭니다.
filename = os.path.join(destination_folder, f"arp_info_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
# ARP 정보를 .txt 파일에 저장합니다.
with open(filename, 'w') as f:
f.write(arp_output)
print(f"ARP 정보가 {filename} 에 저장되었습니다.")
def netBIOS():
print("\n==========Net BIOS File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "NetBIOS")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
try:
# 'nbtstat -n' 명령을 실행하고 그 결과를 반환합니다.
netbios_output = subprocess.check_output("nbtstat -n", shell=True, stderr=subprocess.STDOUT, text=True)
except subprocess.CalledProcessError as e:
netbios_output = f"An error occurred while trying to fetch NetBIOS info: {str(e)}"
# 파일 이름에 현재 시간을 추가해 고유한 파일을 생성합니다.
filename = os.path.join(destination_folder, f"netbios_info_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
# NetBIOS 정보를 .txt 파일에 저장합니다.
with open(filename, 'w') as f:
f.write(netbios_output)
print(f"NetBIOS 정보가 {filename} 에 저장되었습니다.")
def openedHandle():
print("\n==========Opened Handle File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "OpenHandle")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
processes = []
for proc in psutil.process_iter(['pid', 'name', 'open_files']):
processes.append(proc.info)
# 파일 이름에 현재 시간을 추가해 고유한 파일을 생성합니다.
filename = os.path.join(destination_folder, f"handle_info_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
# 핸들 정보를 .txt 파일에 저장합니다.
with open(filename, 'w') as f:
for proc in processes:
f.write(f"PID: {proc['pid']}, Name: {proc['name']}\n")
if proc['open_files']:
for open_file in proc['open_files']:
f.write(f"\t{open_file}\n")
print(f"열려있는 핸들 정보가 {filename} 에 저장되었습니다.")
def taskSchedule():
print("\n==========Task Schedule File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "JobSchedule")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
try:
# 'schtasks' 명령을 실행하고 그 결과를 반환합니다.
output = subprocess.check_output("schtasks /query /fo LIST", shell=True, stderr=subprocess.STDOUT, text=True)
except subprocess.CalledProcessError as e:
output = f"An error occurred while trying to fetch task scheduler info: {str(e)}"
# 파일 이름에 현재 시간을 추가해 고유한 파일을 생성합니다.
filename = os.path.join(destination_folder, f"task_scheduler_info_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
# 작업 스케줄러 정보를 .txt 파일에 저장합니다.
with open(filename, 'w') as f:
f.write(output)
print(f"작업 스케줄 정보가 {filename} 에 저장되었습니다.")
def systemLogon():
print("\n==========System Logon File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "SystemLogon")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
file_path = os.path.join(destination_folder, "logon_history.txt")
query = "wevtutil qe Security /q:\"*[System[Provider[@Name='Microsoft-Windows-Security-Auditing'] and (EventID=4624)]]\" /c:1 /rd:true /f:text"
result = subprocess.run(query, capture_output=True, text=True, shell=True)
with open(file_path, 'w') as f:
f.write(result.stdout)
print(f"시스템 로그온 정보가 {file_path} 에 저장되었습니다.")
def memoryDump():
print("\n==========MemoryDump File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "MemoryDump")
# "MemoryDump" 폴더 생성 후 저장
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# 메모리 덤프 파일을 저장할 디렉토리 경로 지정
dump_directory = destination_folder
# psexec.exe 경로 (psexec을 다운로드한 경로로 설정)
psexec_path = os.getcwd()
psexec_path = os.path.join(psexec_path, "PSTools")
psexec_path = os.path.join(psexec_path, "PsExec.exe")
# procdump.exe 경로 (procdump를 설치한 경로로 설정)
procdump_path = os.getcwd()
procdump_path = os.path.join(procdump_path, "Procdump")
procdump_path = os.path.join(procdump_path, "procdump.exe")
# 현재 실행 중인 모든 프로세스 가져오기
running_processes = list(psutil.process_iter(attrs=['pid', 'name']))
# 실행 중인 모든 프로세스에 대해 메모리 덤프 생성
for process in running_processes:
process_name = process.info['name']
process_pid = process.info['pid']
dump_file_path = os.path.join(dump_directory, f"{process_name}_{process_pid}_dump.dmp")
# procdump을 사용하여 메모리 덤프 실행
cmd = [procdump_path, f"-ma", process_name, dump_file_path]
try:
subprocess.run(cmd, check=True)
print(f"{process_name} 프로세스의 메모리 덤프 생성 완료:", dump_file_path)
except subprocess.CalledProcessError as e:
print(f"{process_name} 프로세스의 메모리 덤프 생성 중 오류 발생:", e)
print("모든 프로세스의 메모리 덤프 생성 완료")
def userAssist():
print("\n==========UserAssist File==========")
keys_to_copy = [
# 실행파일 기록 subkey
r"Software\Microsoft\Windows\CurrentVersion\Explorer\UserAssist\{CEBFF5CD-ACE2-4F4F-9178-9926F41749EA}\Count",
# 바로가기 실행 기록 subkey
r"Software\Microsoft\Windows\CurrentVersion\Explorer\UserAssist\{F4E57C4B-2036-45F0-A9AB-443BCFE33D9F}\Count"
]
current_directory = os.getcwd()
destination_folder = os.path.join(current_directory, "UserAssist")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, keys_to_copy[0], 0, winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as key:
userAssistRegistry(key, destination_folder, 1)
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, keys_to_copy[1], 0, winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as key:
userAssistRegistry(key, destination_folder, 2)
except Exception as e:
print(f"Error: {e}")
def sanitize_filename(filename):
# 파일 이름에 유효하지 않은 문자를 대체
invalid_chars = ['\\', '/', ':', '*', '?', '"', '<', '>', '|', '.', '&', '!', '@', '#', '$', '%', '^', '(', ')', '[', ']', '{', '}', '+', '=', ',', ';', '`', "'", '~', ' ']
for char in invalid_chars:
filename = filename.replace(char, '_')
# 파일 이름을 최대 100자로 제한
return filename[:100]
def userAssistRegistry(subkey, output_directory, num):
try:
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if num == 1:
subkey_name = str(winreg.QueryInfoKey(subkey)[0])
subkey_path = os.path.join(output_directory, "CEBFF5CD-ACE2-4F4F-9178-9926F41749EA")
else:
subkey_name = str(winreg.QueryInfoKey(subkey)[0])
subkey_path = os.path.join(output_directory, "F4E57C4B-2036-45F0-A9AB-443BCFE33D9F")
if not os.path.exists(subkey_path):
os.makedirs(subkey_path)
i = 0
while True:
try:
value_name, value_data, _ = winreg.EnumValue(subkey, i)
value_output_file = os.path.join(subkey_path, f"{sanitize_filename(value_name)}.txt")
# 중간 디렉터리가 없는 경우 생성
value_output_dir = os.path.dirname(value_output_file)
if not os.path.exists(value_output_dir):
os.makedirs(value_output_dir)
with open(value_output_file, 'w') as file:
file.write(str(value_data)) # value_data를 문자열로 변환하여 쓰기
i += 1
except OSError as e:
print(f"Error while processing subkey {subkey_name}: {e}")
break
print(f"Data from subkey {subkey_name} has been saved to {subkey_path}")
except Exception as e:
print(f"Error: {e}")
def autoRun():
print("\n==========AutoRun File==========")
output_directory = os.getcwd()
autorun_folder = os.path.join(output_directory, "AutoRun")
if not os.path.exists(autorun_folder):
os.makedirs(autorun_folder)
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as key:
values = {}
i = 0
while True:
try:
value_name, value_data, _ = winreg.EnumValue(key, i)
values[value_name] = value_data
i += 1
except OSError as e:
break
if values:
output_file = os.path.join(autorun_folder, "autorun_values.txt")
with open(output_file, 'w') as file:
for name, data in values.items():
file.write(f"{name} = {data}\n")
print(f"AutoRun values exported to {output_file}")
else:
print("No AutoRun values found.")
except Exception as e:
print(f"Error: {e}")
def registryUser():
print("\n==========Registry User File==========")
output_directory = os.getcwd()
registry_folder = os.path.join(output_directory, "AllUserRegistry")
if not os.path.exists(registry_folder):
os.makedirs(registry_folder)
try:
with winreg.ConnectRegistry(None, winreg.HKEY_USERS) as users_hive:
with winreg.OpenKey(users_hive, None) as users_key:
num_subkeys = winreg.QueryInfoKey(users_key)[0]
for i in range(num_subkeys):
user_sid = winreg.EnumKey(users_key, i)
user_hive_path = f"{user_sid}\\Software" # 사용자 하이브 경로
with winreg.ConnectRegistry(None, winreg.HKEY_USERS) as user_hive:
with winreg.CreateKey(user_hive, user_hive_path) as user_key:
# 사용자 레지스트리 정보를 파일로 저장
user_output_file = os.path.join(registry_folder, f"{user_sid}_registry.txt")
with open(user_output_file, 'w', encoding='utf-16') as file:
def export_subkeys(key, indent=""):
for j in range(winreg.QueryInfoKey(key)[0]):
subkey_name = winreg.EnumKey(key, j)
subkey_path = os.path.join(user_hive_path, subkey_name)
file.write(f"{indent}[{subkey_path}]\n")
with winreg.OpenKey(key, subkey_name) as subkey:
export_subkeys(subkey, indent + " ")
export_subkeys(user_key)
print(f"{user_sid} registry exported to {user_output_file}")
except Exception as e:
print(f"Error: {e}")
def export_registry_key(reg_file, hkey, key_path):
try:
key = winreg.OpenKey(hkey, key_path)
for i in range(winreg.QueryInfoKey(key)[0]):
sub_key_name = winreg.EnumKey(key, i)
sub_key_path = os.path.join(key_path, sub_key_name)
reg_file.write(f'\n[{sub_key_path}]\n')
export_registry_key(reg_file, hkey, sub_key_path)
for i in range(winreg.QueryInfoKey(key)[1]):
value_name, value_data, value_type = winreg.EnumValue(key, i)
if value_type == winreg.REG_SZ:
reg_file.write(f'"{value_name}"="{value_data}"\n')
elif value_type == winreg.REG_DWORD:
reg_file.write(f'"{value_name}"=dword:{value_data:08X}\n')
# 여러 다른 레지스트리 값 유형에 대한 처리 추가 가능
except Exception as e:
pass
def save_browser_history():
print("\n==========Browser History File==========")
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "BrowserHistory")
# "MemoryDump" 폴더 생성 후 저장
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# 크롬 브라우저의 기록 데이터베이스 경로
chrome_history_path = os.path.expanduser('~') + '\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\History'
# 복사할 임시 파일 경로
temp_db_path = os.path.join(destination_folder, 'temp_history')
# 기록 데이터베이스 파일 복사
copyfile(chrome_history_path, temp_db_path)
# 데이터베이스 연결
connection = sqlite3.connect(temp_db_path)
cursor = connection.cursor()
# 방문 기록 가져오기
cursor.execute("SELECT title, url, last_visit_time FROM urls")
history = cursor.fetchall()
# 파일로 저장
history_file_path = os.path.join(destination_folder, 'browser_history.txt')
with open(history_file_path, 'w', encoding='utf-8') as file:
for item in history:
title, url, timestamp = item
time_formatted = str(timestamp)
file.write(f"Title: {title}\nURL: {url}\nTimestamp: {time_formatted}\n\n")
# 연결 종료 및 임시 데이터베이스 파일 삭제
cursor.close()
connection.close()
os.remove(temp_db_path)
print("브라우저 기록이 'Browser' 폴더에 저장되었습니다.")
def recycleBin():
print("\n==========Recycle Bin File==========")
# 'RecycleBin' 폴더 생성
destination_folder = os.path.join(os.getcwd(), 'RecycleBin')
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
try:
# Powershell 스크립트 실행
script_path = os.path.join(os.path.dirname(__file__), 'copy_recycle.ps1')
command = ["powershell", "-ExecutionPolicy", "Bypass", "-File", script_path]
subprocess.run(command, shell=True, check=True)
print("휴지통 내용이 'RecycleBin' 폴더에 복사되었습니다.")
except Exception as e:
print(f"오류가 발생했습니다: {e}")
def lnkFile():
print("\n==========LNK File==========")
# LNK 폴더 생성 후 저장
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "LNK")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# .lnk파일 위치
source_folder = os.path.expanduser("~") + "\\AppData\\Roaming\\Microsoft\\Windows\\Recent"
# file copy
for root, dirs, files in os.walk(source_folder):
for file in files:
source_file_path = os.path.join(root, file)
destination_file_path = os.path.join(destination_folder, file)
try:
shutil.copy2(source_file_path, destination_file_path)
print(f"복사 완료: {file}")
except Exception as e:
print(f"복사 실패: {file}, 오류: {e}")
def PowerShellLogFile():
print("\n==========PowerShell Log File==========")
# PowerShellLog 폴더 생성 후 저장
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "PowerShellLog")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# .lnk파일 위치
source_folder = os.path.expanduser("~") + "\\AppData\\Roaming\\Microsoft\\Windows\\PowerShell\\PSReadLine"
# file copy
for root, dirs, files in os.walk(source_folder):
for file in files:
source_file_path = os.path.join(root, file)
destination_file_path = os.path.join(destination_folder, file)
try:
shutil.copy2(source_file_path, destination_file_path)
print(f"복사 완료: {file}")
except Exception as e:
print(f"복사 실패: {file}, 오류: {e}")
def registeredService():
print("\n==========Registered Service File==========")
# PowerShellLog 폴더 생성 후 저장
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "RegisteredService")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
filename = os.path.join(destination_folder, f"service_info_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
try:
# 'sc query' 명령을 실행하여 서비스 정보를 가져옵니다.
output = subprocess.check_output("sc query", shell=True, text=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = f"An error occurred while trying to fetch service info: {str(e)}"
# 서비스 정보를 .txt 파일에 저장합니다.
with open(filename, 'w') as f:
f.write(output)
print(f"등록된 서비스 정보가 {filename} 에 저장되었습니다.")
def recentActivity():
print("\n==========Recent Activity File==========")
# PowerShellLog 폴더 생성 후 저장
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "RecentActivity")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# Recent Items 폴더의 경로를 가져옵니다.
recent_folder = os.path.join(os.environ['USERPROFILE'], r'AppData\Roaming\Microsoft\Windows\Recent')
recent_items = []
# 폴더 내의 모든 파일과 폴더를 나열합니다.
for item in os.listdir(recent_folder):
item_path = os.path.join(recent_folder, item)
item_stat = os.stat(item_path)
# 파일의 마지막 액세스 시간을 가져옵니다.
last_access_time = datetime.datetime.fromtimestamp(item_stat.st_atime).strftime('%Y-%m-%d %H:%M:%S')
recent_items.append(f"{item}: Last accessed at {last_access_time}")
# 파일 이름에 현재 시간을 추가해 고유한 파일을 생성합니다.
filename = os.path.join(destination_folder, f"recent_activity_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
# 최근 활동 정보를 .txt 파일에 저장합니다.
with open(filename, 'w') as f:
for info in recent_items:
f.write(f"{info}\n")
print(f"최근 활동 정보가 {filename} 에 저장되었습니다.")
def prefetch():
print("\n==========Prefetch File==========")
# PowerShellLog 폴더 생성 후 저장
current_directory = os.path.dirname(os.path.abspath(__file__))
destination_folder = os.path.join(current_directory, "Prefetch")
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# Prefetch 폴더 경로
prefetch_path = r"C:\Windows\Prefetch"
# Prefetch 폴더에서 .pf 파일 목록 수집
try:
prefetch_files = [f for f in os.listdir(prefetch_path) if f.endswith('.pf')]
except PermissionError:
print("관리자 권한이 필요합니다.")
exit()
for prefetch_file in prefetch_files:
source_file = os.path.join(prefetch_path, prefetch_file)
destination_file = os.path.join(destination_folder, prefetch_file)
shutil.copy(source_file, destination_file)
print(f"prefetch 파일이 {destination_file} 에 저장되었습니다.")
def NTFS():
print("\n==========NTFS Artifact File==========")
script_dir = os.path.dirname(os.path.abspath(__file__))
ntfs_folder = os.path.join(script_dir, 'NTFS')
os.makedirs(ntfs_folder, exist_ok=True) # 'NTFS' 폴더 생성 (이미 존재하면 무시)
# 복사할 NTFS 시스템 파일
ntfs_files = ["$MFT", "$LogFile", "$Extend\\$UsnJrnl:$J"]
for ntfs_file in ntfs_files:
source_file = "C:\\" + ntfs_file
destination_file_name = ntfs_file.replace('$', '').replace(':', '').replace('\\', '_') + ".txt"
destination_file = os.path.join(ntfs_folder, destination_file_name)
command = "fsutil file queryextents {} > {}".format(source_file, destination_file)
try:
subprocess.run(command, shell=True, check=True, stderr=subprocess.PIPE)
print("{}를 성공적으로 복사했습니다.".format(source_file))
except subprocess.CalledProcessError as e:
print("{}를 복사하는 데 실패했습니다: {}".format(source_file, e.stderr.decode('utf-8')))
print(f"NTFS 파일이 {destination_file} 에 저장되었습니다.")
def main():
init()
while(True):
showOptions()
options = input("[Select Option] : ")
if options == "0":
print("Good Bye!")
exit()
elif options == "1":
memoryDump()
elif options == "2":
registryHive()
elif options == "3":
systemInfo()
elif options == "4":
systemAudit()
elif options == "5":
groupPolicy()
elif options == "6":
eventLog()
elif options == "7":
serviceLog()
elif options == "8":
hostsData()
elif options == "9":
srum()
elif options == "10":
environmentVar()
elif options == "11":
patchList()
elif options == "12":
processList()
elif options == "13":
openPort()
elif options == "14":
IPConfigInfo()
elif options == "15":
arpInfo()
elif options == "16":
netBIOS()
elif options == "17":
openedHandle()
elif options == "18":
taskSchedule()
elif options == "19":
systemLogon()
elif options == "20":
userAssist()
elif options == "21":
autoRun()
elif options == "22":
registryUser()
elif options == "23":
save_browser_history()
elif options == "24":
recycleBin()
elif options == "25":
lnkFile()
elif options == "26":
PowerShellLogFile()
elif options == "27":
registeredService()
elif options == "28":
recentActivity()
elif options == "29":
prefetch()
elif options == "30":
NTFS()
elif options == "777":
memoryDump()
registryHive()
systemInfo()
systemAudit()
groupPolicy()
eventLog()
serviceLog()
hostsData()
srum()
environmentVar()
patchList()
processList()
openPort()
IPConfigInfo()
arpInfo()
netBIOS()
openedHandle()
taskSchedule()
systemLogon()
userAssist()
autoRun()
registryUser()
save_browser_history()
recycleBin()
lnkFile()
PowerShellLogFile()
registeredService()
recentActivity()
prefetch()
NTFS()
else :
print("\nPlease input correct options!")
pass
if __name__ == "__main__":
main()
|
KIMJOONSIG/Reboot3
|
Windows/reboot3.py
|
reboot3.py
|
py
| 36,418 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyfiglet.Figlet",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_LOCAL_MACHINE",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "winreg.HKEY_CURRENT_USER",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "winreg.ConnectRegistry",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "winreg.EnumKey",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "winreg.OpenKey",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "subprocess.getoutput",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "subprocess.getoutput",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "subprocess.getoutput",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "subprocess.getoutput",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "subprocess.getoutput",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "subprocess.getoutput",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "winreg.OpenKey",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_LOCAL_MACHINE",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_READ",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "winreg.EnumKey",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "winreg.OpenKey",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_LOCAL_MACHINE",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_READ",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "winreg.EnumValue",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "os.environ.items",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "winreg.OpenKey",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_LOCAL_MACHINE",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_READ",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_WOW64_64KEY",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "winreg.EnumKey",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 330,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 331,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "socket.gethostbyname",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 401,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 402,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 403,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 424,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 449,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 470,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 471,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 472,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 474,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 488,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 491,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 499,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 500,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 504,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 514,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 522,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 539,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 539,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 541,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "winreg.OpenKey",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_CURRENT_USER",
"line_number": 546,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_READ",
"line_number": 546,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_WOW64_64KEY",
"line_number": 546,
"usage_type": "attribute"
},
{
"api_name": "winreg.OpenKey",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_CURRENT_USER",
"line_number": 548,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_READ",
"line_number": 548,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_WOW64_64KEY",
"line_number": 548,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 564,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 564,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 569,
"usage_type": "attribute"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 572,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 574,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "winreg.EnumValue",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 581,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 584,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 585,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 585,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 602,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 603,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 604,
"usage_type": "call"
},
{
"api_name": "winreg.OpenKey",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_CURRENT_USER",
"line_number": 607,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_READ",
"line_number": 607,
"usage_type": "attribute"
},
{
"api_name": "winreg.KEY_WOW64_64KEY",
"line_number": 607,
"usage_type": "attribute"
},
{
"api_name": "winreg.EnumValue",
"line_number": 612,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 619,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 633,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 634,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "winreg.ConnectRegistry",
"line_number": 638,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_USERS",
"line_number": 638,
"usage_type": "attribute"
},
{
"api_name": "winreg.OpenKey",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 640,
"usage_type": "call"
},
{
"api_name": "winreg.EnumKey",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "winreg.ConnectRegistry",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "winreg.HKEY_USERS",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "winreg.CreateKey",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 649,
"usage_type": "attribute"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "winreg.EnumKey",
"line_number": 653,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 654,
"usage_type": "attribute"
},
{
"api_name": "winreg.OpenKey",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "winreg.OpenKey",
"line_number": 671,
"usage_type": "call"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "winreg.EnumKey",
"line_number": 673,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 674,
"usage_type": "attribute"
},
{
"api_name": "winreg.QueryInfoKey",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "winreg.EnumValue",
"line_number": 680,
"usage_type": "call"
},
{
"api_name": "winreg.REG_SZ",
"line_number": 681,
"usage_type": "attribute"
},
{
"api_name": "winreg.REG_DWORD",
"line_number": 683,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 693,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 694,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 694,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 697,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 698,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 701,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 703,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 703,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfile",
"line_number": 706,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 709,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 717,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 717,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 727,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 735,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 735,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 735,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 736,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 736,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 737,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 741,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 741,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 752,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 752,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 752,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 753,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 753,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 754,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 755,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 758,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 761,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 763,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 763,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 764,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 764,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy2",
"line_number": 767,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 775,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 776,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 777,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 777,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 778,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 781,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 781,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 786,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 786,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 787,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 787,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy2",
"line_number": 789,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 797,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 797,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 797,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 798,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 798,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 799,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 799,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 800,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 801,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 801,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 801,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 801,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 806,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 806,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 807,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 819,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 820,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 820,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 821,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 821,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 822,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 824,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 824,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 824,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 829,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 830,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 830,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 831,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 834,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 834,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 838,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 838,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 838,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 838,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 849,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 850,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 850,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 851,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 851,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 852,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 858,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 864,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 864,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 865,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 865,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 866,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 872,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 872,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 872,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 873,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 873,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 874,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 882,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 882,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 887,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 887,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 889,
"usage_type": "attribute"
}
] |
23404494402
|
"""Test the concurrency module."""
from typing import Any
import pytest
from rtasr.concurrency import ConcurrencyHandler, ConcurrencyToken
class TestConcurrencyToken:
"""Test the ConcurrencyToken class."""
@pytest.mark.parametrize("value", [None, "string", True, False, [], {}])
def test_concurrency_token_invalid(self, value: Any) -> None:
"""Test the concurrency token with invalid values."""
with pytest.raises(TypeError):
ConcurrencyToken(value)
@pytest.mark.parametrize("value", [0, 1, 2, 1.0, 2.0])
def test_concurrency_token_valid(self, value: int) -> None:
"""Test the concurrency token."""
token = ConcurrencyToken(value=value)
assert token.value == value
class TestConcurrencyHandler:
"""Test the ConcurrencyHandler class."""
@pytest.mark.asyncio
async def test_concurrency_handler_with_limit(self) -> None:
"""Test the concurrency handler with a limit."""
limit = 5
concurrency_handler = ConcurrencyHandler(limit)
assert hasattr(concurrency_handler, "limit")
assert hasattr(concurrency_handler, "queue")
assert concurrency_handler.limit == limit
assert concurrency_handler.queue.maxsize == limit
tokens = []
for _ in range(limit):
token = await concurrency_handler.get()
assert isinstance(token, ConcurrencyToken)
tokens.append(token)
# Queue should be empty now
assert concurrency_handler.queue.empty()
for token in tokens:
concurrency_handler.put(token)
# Queue should be refilled
for _ in range(limit):
token = await concurrency_handler.get()
assert isinstance(token, ConcurrencyToken)
@pytest.mark.asyncio
async def test_concurrency_handler_without_limit(self) -> None:
"""Test the concurrency handler without a limit."""
concurrency_handler = ConcurrencyHandler(limit=None)
token = await concurrency_handler.get()
assert token is None
|
Wordcab/rtasr
|
tests/test_concurrency.py
|
test_concurrency.py
|
py
| 2,077 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "typing.Any",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rtasr.concurrency.ConcurrencyToken",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "rtasr.concurrency.ConcurrencyToken",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "rtasr.concurrency.ConcurrencyHandler",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "rtasr.concurrency.ConcurrencyToken",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "rtasr.concurrency.ConcurrencyToken",
"line_number": 56,
"usage_type": "argument"
},
{
"api_name": "pytest.mark",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "rtasr.concurrency.ConcurrencyHandler",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 58,
"usage_type": "attribute"
}
] |
6173790975
|
import csv
import sqlite3
import numpy as np
import pandas as pd
from nltk.stem.porter import *
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from PIL import Image
from os import path
import matplotlib.pyplot as plt
import matplotlib as mpl
import pickle
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud, STOPWORDS
from collections import Counter
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
stemmer = SnowballStemmer('english')
wordnet_lemmatizer = WordNetLemmatizer()
#############################In/out############################
def open_file(csvfile):
reader = pd.read_csv(csvfile)
return reader
def output_file(df,string):
df.to_csv(string, index = False)
#############################Word Cloud & Feature Extraction############################
def text_process(data):
'''
Takes in a df in format of [text,stars] performs the following:
1. Lower capital letters
2. Remove all punctuation
3. Remove all stopwords
4. Reduce words to their word stem
5. Return a list of words
'''
for i in range(len(data)):
line = data[i]
line = line.lower() # lower case
translation = str.maketrans("", "", string.punctuation);
line = line.translate(translation)
split = word_tokenize(line)
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
filtered = []
for token in split:
if re.search('[a-zA-Z]', token):
filtered.append(token)
word = [i for i in filtered if i not in stopwords.words('english')]
d = [stemmer.stem(word) for word in word]
d = [wordnet_lemmatizer.lemmatize(word) for word in d]
data[i] = d
return data
def top_words(business_id,review_ml ):
train = review_ml[review_ml['business_id'] == business_id][review_ml['True(1)/Deceptive(0)'] == 'True']
text = list(train['Review']) # text
text = text_process(text)
text = sum(text, [])
counts = Counter(text)
wordcloud = WordCloud(
background_color='white',
max_words=100,
max_font_size=50,
min_font_size=10,
random_state=40,
).fit_words(counts)
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off') # remove axis
plt.show()
def change_label(x):
for i in range(len(x)):
if x[i] >= 3.0: # good review: stars >=3.0
x[i] = 1
else: # bad review: stars 3.0
x[i] = 0
return x
def bigram(business_id, review_ml):
# only use true review
train0 = review_ml[review_ml['business_id'] == business_id]
train = train0[train0['True(1)/Deceptive(0)'] == 'True']
#print(train.head())
#train_data = list(train['Review']) # text
label = list(train['Stars']) # ratings
#print(label)
train_label = change_label(label)
#print(train_label)
# TfidfVectorizer Transform
transformer = TfidfVectorizer(stop_words='english',
ngram_range=(2, 2)) # "ignore terms that appear in less than 1% of the documents".
#print(transformer)
cvectorizer = transformer.fit(train['Review'])
#print(cvectorizer)
transformed = cvectorizer.transform(train['Review'])
#print(transformed)
# SVM regression
clf = LinearSVC()
clf.fit(transformed, train_label)
coefficients = clf.coef_.ravel()
#print(coefficients)
pos_coefficients = np.argsort(coefficients)[-10:]
neg_coefficients = np.argsort(coefficients)[:10]
combine = np.hstack([neg_coefficients, pos_coefficients])
#print("combine:, ",combine)
#print("coefficients[combine]: ", coefficients[combine])
plt.figure(figsize=(7, 4))
#print("fisnish 1")
colors = ['red' if i < 0 else 'blue' for i in coefficients[combine]]
#print("finish 2")
plt.bar(np.arange(len(coefficients[combine])), coefficients[combine], color=colors)
#print("finish 3")
feature_names = np.array(cvectorizer.get_feature_names())
#print("finish 4")
plt.title('why the restaurant is rated as bad or good ', fontsize=15)
#print("finish 5")
plt.xticks(np.arange(0, 2 * 10), feature_names[combine], rotation=40, ha='right')
#print("finish 6")
plt.show()
#print("finish 7")
#############################helper function#############################
def load_database_data(c, zipcode, business_name_input):
c.execute('''
SELECT b_id,r.review, r.r_stars
FROM business, review_fact_table r
WHERE postal_code = ? AND name = ? AND r.business_id = b_id''', (zipcode, business_name_input,))
dataframe = pd.DataFrame(data=c.fetchall(), columns=['business_id', 'review', 'rating'])
return dataframe
def select_data(c, zipcode, business_name):
c.execute('''
SELECT DISTINCT(b_id)
FROM business, review_fact_table r
WHERE postal_code = ? AND name = ? AND r.business_id = b_id''', (zipcode, business_name,))
single_df = pd.DataFrame(data=c.fetchall(), columns=['business_id'])
return single_df['business_id'][0]
def fake_ratio(predict, single):
# Load fake results
predicted_fake = predict
# reviews that has only that business id
reviews = predicted_fake[predicted_fake['business_id'] == single]
n = reviews.shape[0]
# print(n)
fake = reviews.groupby('True(1)/Deceptive(0)').count()['Review'][0]
# print(fake)
fake_percentage = fake / n
# print(fake_percentage)
return fake_percentage
##############################main######################################
def main():
#open states and income raw data
zipcode = input("zipcode:")
business_name = input("restaurant name:")
print(zipcode,business_name)
conn = sqlite3.connect('yelp.db')
c = conn.cursor()
predicted_fake = open_file('data/predicted_review.csv')
# find the business id
single = select_data(c, zipcode, business_name)
# print(single)
fake_review_ratio = fake_ratio(predicted_fake,single)
print(fake_review_ratio)
#top_words(single, predicted_fake)
bigram(single, predicted_fake)
#if __name__=="__main__":
#main()
main()
|
zzhang83/Yelp_Sentiment_Analysis
|
Scripts/UI.py
|
UI.py
|
py
| 6,335 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "nltk.stem.SnowballStemmer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.LinearSVC",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 185,
"usage_type": "call"
}
] |
8190287555
|
#!/usr/bin/env python3
# Get sequences from NCBI.
# To be called from Snakefile.
# Usage: python windows.py <infile> <outfile> <email> <window_size>
import os
import sys
from Bio import Entrez
from Bio import SeqIO
import pandas as pd
def main():
snpfile = sys.argv[1]
outfile = sys.argv[2]
email = sys.argv[3]
window_size = sys.argv[4]
interest = pd.read_csv(
snpfile,
header=0,
dtype={"chrom": str, "center": np.int64},
)
interest.columns = interest.columns.str.lower().str.replace(" ", "_")
interest[["chrom", "center"]] = (
interest.iloc[:, 0]
.str.replace("_[A-Z]{3}?", "")
.str.replace(" ", "")
.str.split("_", expand=True)
)
interest.assign(
start=lambda center: center.to_numeric() - window_size,
end=lambda center: center.to_numeric() + window_size
)
interest.index.rename("Index", inplace=True)
summary = pd.read_csv("Data/dmelSummary.csv")
summary.index.rename("Index", inplace=True)
summary.columns = summary.columns.str.lower().str.replace(" ", "_")
seqs = []
Entrez.email = email
for index, row in interest.iterrows():
with Entrez.efetch(
db="nucleotide",
id=summary.loc[summary["name"] == row["chrom"], "refseq"].iat[0],
rettype="fasta",
strand=1,
seq_start=row["start"],
seq_stop=row["end"],
) as handle:
seqs.append(SeqIO.read(handle, "fasta"))
SeqIO.write(seqs, outfile, "fasta")
if __name__ == "__main__":
main()
|
mpjuers/SexualSelectionSubstitutions
|
Scripts/GetData/windows.py
|
windows.py
|
py
| 1,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "Bio.Entrez.email",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "Bio.Entrez",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "Bio.Entrez.efetch",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "Bio.Entrez",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.read",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.write",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 54,
"usage_type": "name"
}
] |
29284148447
|
# -*- coding: utf-8 -*-
import sys
import re
import pdb
import sqlite3
def to_numbers(ch):
n_dict = {"一":1,\
"二":2,\
"三":3,\
"四":4,\
"五":5,\
"六":6,\
"七":7,\
"八":8,\
"九":9,\
"十":10}
return n_dict[ch]
def main(args):
#pdb.set_trace()
#rid = "testrid"
rid = args[1].decode("gb18030");
#lines = "||| 小校场 三清殿 小校场 ||| \ | /||| 小校场---武当广场---小校场 ||| / ↑ \||| 小校场 凌霄宫 小校场 ||||||武当广场 - [门派]|||||| 这是一个由大石板铺成的广场,是武当弟子学习武功和互|||相切磋的地点。周围种满了梧桐树,一到秋天就是满地的落叶。|||一个年纪轻轻的道童正在打扫。北边是灵霄宫三清殿。||| 「深秋」: 东方的天空渐渐的发白了,又一个万物初醒的早上!。|||||| 这里明显的出口是 north、east、southeast、northeast、west、southdown、southwest|||、northwest 和 eastdown。|||||| 二位武当派小道士 道童(Daotong)||| 武当派真人 冲虚道长(Chongxu daozhang)|||>".split("|||")
lines = args[2].decode("gb18030").encode("utf8").split("|||")
p = re.compile(r"^\s*((一|二|三|四|五|六|七|八|九|十)(位|只))?(.*?)\((.*?)\)( <(.*?)>)?$")
conn = sqlite3.connect('./tintin/myscripts/pkuxkx/gps/xkxmap.sqlite')
cursor = conn.cursor()
cursor.execute('delete from room_npc where rid=?',(rid,))
for line in lines:
if p.match(line):
s = p.match(line)
npc_num = 1
if s.group(2) is not None:
npc_num = to_numbers(s.group(2))
ps = re.compile(r" |「|」")
mylist = ps.split(s.group(4).strip(" "))
npc_title1=""
npc_title2=""
npc_title3=""
npc_cname=""
ts = []
for t in mylist:
if t=="":
continue
ts.append(t)
if len(ts) == 4:
npc_title1 = ts[0]
npc_title2 = ts[1]
npc_title3 = ts[2]
npc_cname = ts[3]
if len(ts) == 3:
npc_title1 = ts[0]
npc_title2 = ts[1]
npc_cname = ts[2]
if len(ts) == 2:
npc_title1 = ts[0]
npc_cname = ts[1]
if len(ts) == 1:
npc_cname = ts[0]
npc_ename = s.group(5)
npc_status = s.group(7)
if npc_status is None:
npc_status = "normal"
# print(npc_num)
# print(npc_title1.decode("utf8").encode("gb18030"))
# print(npc_title2.decode("utf8").encode("gb18030"))
# print(npc_title3.decode("utf8").encode("gb18030"))
# print(npc_cname.decode("utf8").encode("gb18030"))
# print(npc_ename.decode("utf8").encode("gb18030"))
# print(npc_status.decode("utf8").encode("gb18030"))
cursor.execute('insert into room_npc (rid,npc_num,npc_title1,npc_title2,npc_title3,npc_cname,npc_ename) values (?,?,?,?,?,?,?)',(rid,npc_num,npc_title1.decode("utf8"),npc_title2.decode("utf8"),npc_title3.decode("utf8"),npc_cname.decode("utf8"),npc_ename.decode("utf8")));
conn.commit()
conn.close()
if __name__=="__main__":
main(sys.argv)
|
Tubao/xkx
|
pkuxkx/gps/store_npcs.py
|
store_npcs.py
|
py
| 3,716 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 80,
"usage_type": "attribute"
}
] |
17233943864
|
# coding: utf-8
"""
Refinery Calc API Documentation
Integrate the powerful Refinery Calc Engine into your process using this API. # noqa: E501
OpenAPI spec version: 1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OutputTypes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'balance': 'list[OutputDetails]',
'complexity': 'list[OutputDetails]',
'constraints': 'list[OutputDetails]',
'crude_names': 'list[OutputDetails]',
'crude_percent': 'list[OutputDetails]',
'crude_pricing': 'list[OutputDetails]',
'crude_quality': 'list[OutputDetails]',
'crudes_and_feeds': 'list[OutputDetails]',
'delete_category': 'list[OutputDetails]',
'economics': 'list[OutputDetails]',
'end_points_f': 'list[OutputDetails]',
'expense_factors': 'list[OutputDetails]',
'fcc_hydrk_conv': 'list[OutputDetails]',
'feeds_pricing': 'list[OutputDetails]',
'gasoline_blending': 'list[OutputDetails]',
'general': 'list[OutputDetails]',
'hydrogen': 'list[OutputDetails]',
'product_pricing': 'list[OutputDetails]',
'products': 'list[OutputDetails]',
'refinery_layout_svg': 'list[OutputDetails]',
'sustainability': 'list[OutputDetails]',
'unit_balance': 'list[OutputDetails]'
}
attribute_map = {
'balance': 'balance',
'complexity': 'complexity',
'constraints': 'constraints',
'crude_names': 'crudeNames',
'crude_percent': 'crudePercent',
'crude_pricing': 'crudePricing',
'crude_quality': 'crudeQuality',
'crudes_and_feeds': 'crudesAndFeeds',
'delete_category': 'deleteCategory',
'economics': 'economics',
'end_points_f': 'endPointsF',
'expense_factors': 'expenseFactors',
'fcc_hydrk_conv': 'fccHydrkConv',
'feeds_pricing': 'feedsPricing',
'gasoline_blending': 'gasolineBlending',
'general': 'general',
'hydrogen': 'hydrogen',
'product_pricing': 'productPricing',
'products': 'products',
'refinery_layout_svg': 'refineryLayoutSVG',
'sustainability': 'sustainability',
'unit_balance': 'unitBalance'
}
def __init__(self, balance=None, complexity=None, constraints=None, crude_names=None, crude_percent=None, crude_pricing=None, crude_quality=None, crudes_and_feeds=None, delete_category=None, economics=None, end_points_f=None, expense_factors=None, fcc_hydrk_conv=None, feeds_pricing=None, gasoline_blending=None, general=None, hydrogen=None, product_pricing=None, products=None, refinery_layout_svg=None, sustainability=None, unit_balance=None): # noqa: E501
"""OutputTypes - a model defined in Swagger""" # noqa: E501
self._balance = None
self._complexity = None
self._constraints = None
self._crude_names = None
self._crude_percent = None
self._crude_pricing = None
self._crude_quality = None
self._crudes_and_feeds = None
self._delete_category = None
self._economics = None
self._end_points_f = None
self._expense_factors = None
self._fcc_hydrk_conv = None
self._feeds_pricing = None
self._gasoline_blending = None
self._general = None
self._hydrogen = None
self._product_pricing = None
self._products = None
self._refinery_layout_svg = None
self._sustainability = None
self._unit_balance = None
self.discriminator = None
if balance is not None:
self.balance = balance
if complexity is not None:
self.complexity = complexity
if constraints is not None:
self.constraints = constraints
if crude_names is not None:
self.crude_names = crude_names
if crude_percent is not None:
self.crude_percent = crude_percent
if crude_pricing is not None:
self.crude_pricing = crude_pricing
if crude_quality is not None:
self.crude_quality = crude_quality
if crudes_and_feeds is not None:
self.crudes_and_feeds = crudes_and_feeds
if delete_category is not None:
self.delete_category = delete_category
if economics is not None:
self.economics = economics
if end_points_f is not None:
self.end_points_f = end_points_f
if expense_factors is not None:
self.expense_factors = expense_factors
if fcc_hydrk_conv is not None:
self.fcc_hydrk_conv = fcc_hydrk_conv
if feeds_pricing is not None:
self.feeds_pricing = feeds_pricing
if gasoline_blending is not None:
self.gasoline_blending = gasoline_blending
if general is not None:
self.general = general
if hydrogen is not None:
self.hydrogen = hydrogen
if product_pricing is not None:
self.product_pricing = product_pricing
if products is not None:
self.products = products
if refinery_layout_svg is not None:
self.refinery_layout_svg = refinery_layout_svg
if sustainability is not None:
self.sustainability = sustainability
if unit_balance is not None:
self.unit_balance = unit_balance
@property
def balance(self):
"""Gets the balance of this OutputTypes. # noqa: E501
:return: The balance of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this OutputTypes.
:param balance: The balance of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._balance = balance
@property
def complexity(self):
"""Gets the complexity of this OutputTypes. # noqa: E501
:return: The complexity of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._complexity
@complexity.setter
def complexity(self, complexity):
"""Sets the complexity of this OutputTypes.
:param complexity: The complexity of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._complexity = complexity
@property
def constraints(self):
"""Gets the constraints of this OutputTypes. # noqa: E501
:return: The constraints of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._constraints
@constraints.setter
def constraints(self, constraints):
"""Sets the constraints of this OutputTypes.
:param constraints: The constraints of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._constraints = constraints
@property
def crude_names(self):
"""Gets the crude_names of this OutputTypes. # noqa: E501
:return: The crude_names of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_names
@crude_names.setter
def crude_names(self, crude_names):
"""Sets the crude_names of this OutputTypes.
:param crude_names: The crude_names of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_names = crude_names
@property
def crude_percent(self):
"""Gets the crude_percent of this OutputTypes. # noqa: E501
:return: The crude_percent of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_percent
@crude_percent.setter
def crude_percent(self, crude_percent):
"""Sets the crude_percent of this OutputTypes.
:param crude_percent: The crude_percent of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_percent = crude_percent
@property
def crude_pricing(self):
"""Gets the crude_pricing of this OutputTypes. # noqa: E501
:return: The crude_pricing of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_pricing
@crude_pricing.setter
def crude_pricing(self, crude_pricing):
"""Sets the crude_pricing of this OutputTypes.
:param crude_pricing: The crude_pricing of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_pricing = crude_pricing
@property
def crude_quality(self):
"""Gets the crude_quality of this OutputTypes. # noqa: E501
:return: The crude_quality of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_quality
@crude_quality.setter
def crude_quality(self, crude_quality):
"""Sets the crude_quality of this OutputTypes.
:param crude_quality: The crude_quality of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_quality = crude_quality
@property
def crudes_and_feeds(self):
"""Gets the crudes_and_feeds of this OutputTypes. # noqa: E501
:return: The crudes_and_feeds of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crudes_and_feeds
@crudes_and_feeds.setter
def crudes_and_feeds(self, crudes_and_feeds):
"""Sets the crudes_and_feeds of this OutputTypes.
:param crudes_and_feeds: The crudes_and_feeds of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crudes_and_feeds = crudes_and_feeds
@property
def delete_category(self):
"""Gets the delete_category of this OutputTypes. # noqa: E501
:return: The delete_category of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._delete_category
@delete_category.setter
def delete_category(self, delete_category):
"""Sets the delete_category of this OutputTypes.
:param delete_category: The delete_category of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._delete_category = delete_category
@property
def economics(self):
"""Gets the economics of this OutputTypes. # noqa: E501
:return: The economics of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._economics
@economics.setter
def economics(self, economics):
"""Sets the economics of this OutputTypes.
:param economics: The economics of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._economics = economics
@property
def end_points_f(self):
"""Gets the end_points_f of this OutputTypes. # noqa: E501
:return: The end_points_f of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._end_points_f
@end_points_f.setter
def end_points_f(self, end_points_f):
"""Sets the end_points_f of this OutputTypes.
:param end_points_f: The end_points_f of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._end_points_f = end_points_f
@property
def expense_factors(self):
"""Gets the expense_factors of this OutputTypes. # noqa: E501
:return: The expense_factors of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._expense_factors
@expense_factors.setter
def expense_factors(self, expense_factors):
"""Sets the expense_factors of this OutputTypes.
:param expense_factors: The expense_factors of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._expense_factors = expense_factors
@property
def fcc_hydrk_conv(self):
"""Gets the fcc_hydrk_conv of this OutputTypes. # noqa: E501
:return: The fcc_hydrk_conv of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._fcc_hydrk_conv
@fcc_hydrk_conv.setter
def fcc_hydrk_conv(self, fcc_hydrk_conv):
"""Sets the fcc_hydrk_conv of this OutputTypes.
:param fcc_hydrk_conv: The fcc_hydrk_conv of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._fcc_hydrk_conv = fcc_hydrk_conv
@property
def feeds_pricing(self):
"""Gets the feeds_pricing of this OutputTypes. # noqa: E501
:return: The feeds_pricing of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._feeds_pricing
@feeds_pricing.setter
def feeds_pricing(self, feeds_pricing):
"""Sets the feeds_pricing of this OutputTypes.
:param feeds_pricing: The feeds_pricing of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._feeds_pricing = feeds_pricing
@property
def gasoline_blending(self):
"""Gets the gasoline_blending of this OutputTypes. # noqa: E501
:return: The gasoline_blending of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._gasoline_blending
@gasoline_blending.setter
def gasoline_blending(self, gasoline_blending):
"""Sets the gasoline_blending of this OutputTypes.
:param gasoline_blending: The gasoline_blending of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._gasoline_blending = gasoline_blending
@property
def general(self):
"""Gets the general of this OutputTypes. # noqa: E501
:return: The general of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._general
@general.setter
def general(self, general):
"""Sets the general of this OutputTypes.
:param general: The general of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._general = general
@property
def hydrogen(self):
"""Gets the hydrogen of this OutputTypes. # noqa: E501
:return: The hydrogen of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._hydrogen
@hydrogen.setter
def hydrogen(self, hydrogen):
"""Sets the hydrogen of this OutputTypes.
:param hydrogen: The hydrogen of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._hydrogen = hydrogen
@property
def product_pricing(self):
"""Gets the product_pricing of this OutputTypes. # noqa: E501
:return: The product_pricing of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._product_pricing
@product_pricing.setter
def product_pricing(self, product_pricing):
"""Sets the product_pricing of this OutputTypes.
:param product_pricing: The product_pricing of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._product_pricing = product_pricing
@property
def products(self):
"""Gets the products of this OutputTypes. # noqa: E501
:return: The products of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._products
@products.setter
def products(self, products):
"""Sets the products of this OutputTypes.
:param products: The products of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._products = products
@property
def refinery_layout_svg(self):
"""Gets the refinery_layout_svg of this OutputTypes. # noqa: E501
:return: The refinery_layout_svg of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._refinery_layout_svg
@refinery_layout_svg.setter
def refinery_layout_svg(self, refinery_layout_svg):
"""Sets the refinery_layout_svg of this OutputTypes.
:param refinery_layout_svg: The refinery_layout_svg of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._refinery_layout_svg = refinery_layout_svg
@property
def sustainability(self):
"""Gets the sustainability of this OutputTypes. # noqa: E501
:return: The sustainability of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._sustainability
@sustainability.setter
def sustainability(self, sustainability):
"""Sets the sustainability of this OutputTypes.
:param sustainability: The sustainability of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._sustainability = sustainability
@property
def unit_balance(self):
"""Gets the unit_balance of this OutputTypes. # noqa: E501
:return: The unit_balance of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._unit_balance
@unit_balance.setter
def unit_balance(self, unit_balance):
"""Sets the unit_balance of this OutputTypes.
:param unit_balance: The unit_balance of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._unit_balance = unit_balance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OutputTypes, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OutputTypes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
refinerycalc/sdk-example-python
|
python/refinerycalc/models/output_types.py
|
output_types.py
|
py
| 19,790 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "six.iteritems",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 641,
"usage_type": "call"
}
] |
32552302191
|
import threading
import wikipedia
from kivy.clock import mainthread
from kivymd.app import MDApp
class MainApp(MDApp):
url = ""
def build(self):
self.title = "Wikipedia-App"
@mainthread
def search(self, text):
t1 = threading.Thread(target=self.get_wiki, args=(text,), daemon=True)
t1.start()
def get_wiki(self, text):
self.root.ids.rc_spin.active = True
self.root.ids.summary.text = ""
self.root.ids.title.text = ""
wikipedia.set_lang("en")
try:
summary = wikipedia.page(text.strip())
self.root.ids.title.text = summary.title
self.root.ids.summary.text = f"\n{summary.summary}"
except Exception as e:
print(e)
self.root.ids.title.text = (
"[color=#EE4B2B]"
+ "Sorry unable to find "
+ self.root.ids.fld.text
+ "[/color]"
)
self.root.ids.rc_spin.active = False
if __name__ == "__main__":
MainApp().run()
|
Kulothungan16/Example-Kivy-Apps
|
WikiPedia/main.py
|
main.py
|
py
| 1,052 |
python
|
en
|
code
| 42 |
github-code
|
6
|
[
{
"api_name": "kivymd.app.MDApp",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "kivy.clock.mainthread",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "wikipedia.set_lang",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wikipedia.page",
"line_number": 26,
"usage_type": "call"
}
] |
457427877
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import logging
import math
from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd.variable import Variable
from fastreid.modeling.ops import MetaConv2d, MetaLinear, MetaBNNorm, MetaINNorm, MetaIBNNorm, MetaGate
from fastreid.layers import (
IBN,
SELayer,
Non_local,
get_norm,
)
from fastreid.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
from .build import BACKBONE_REGISTRY
from fastreid.utils import comm
K = 4
logger = logging.getLogger(__name__)
model_urls = {
'18x': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'34x': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'50x': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'101x': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'ibn_18x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet18_ibn_a-2f571257.pth',
'ibn_34x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet34_ibn_a-94bc1577.pth',
'ibn_50x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_a-d9d0bb7b.pth',
'ibn_101x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_a-59ea0ac6.pth',
'se_ibn_101x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/se_resnet101_ibn_a-fabed4e2.pth',
}
def repackage_hidden(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
class Sequential_ext(nn.Module):
"""A Sequential container extended to also propagate the gating information
that is needed in the target rate loss.
"""
def __init__(self, *args):
super(Sequential_ext, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def __getitem__(self, idx):
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def forward(self, input, opt=None):
for i, module in enumerate(self._modules.values()):
input = module(input, opt)
return input
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, bn_norm, with_ibn=False, with_se=False,
stride=1, downsample=None, reduction=16):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
if with_ibn:
self.bn1 = IBN(planes, bn_norm)
else:
self.bn1 = get_norm(bn_norm, planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = get_norm(bn_norm, planes)
self.relu = nn.ReLU(inplace=True)
if with_se:
self.se = SELayer(planes, reduction)
else:
self.se = nn.Identity()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class MetaSELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(MetaSELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = MetaLinear(channel, int(channel / reduction), bias=False)
self.relu = nn.ReLU()
self.fc2 = MetaLinear(int(channel / reduction), channel, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x, opt=None):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.relu(self.fc1(y, opt))
y = self.sigmoid(self.fc2(y, opt)).view(b, c, 1, 1)
return x * y.expand_as(x)
class Bottleneck2(nn.Module):
expansion = 4*K
def __init__(self, inplanes, planes, bn_norm, with_ibn=False, with_se=False,
stride=1, downsample=None, reduction=16):
super(Bottleneck2, self).__init__()
self.conv1 = MetaConv2d(inplanes * K, planes, kernel_size=1, bias=False, groups=K)
if with_ibn:
self.bn1 = MetaIBNNorm(planes)
else:
self.bn1 = MetaBNNorm(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, groups=K)
self.bn2 = MetaBNNorm(planes)
self.conv3 = MetaConv2d(planes, planes * self.expansion, kernel_size=1, bias=False, groups=K)
self.bn3 = MetaBNNorm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
if with_se:
self.se = SELayer(planes * self.expansion, reduction)
else:
self.se = nn.Identity()
self.downsample = downsample
self.stride = stride
def forward(self, x, opt=None):
residual = x
out = self.conv1(x, opt)
out = self.bn1(out, opt)
out = self.relu(out)
out = self.conv2(out, opt)
out = self.bn2(out, opt)
out = self.relu(out)
out = self.conv3(out, opt)
out = self.bn3(out, opt)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x, opt)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, bn_norm, with_ibn=False, with_se=False,
stride=1, downsample=None, reduction=16):
super(Bottleneck, self).__init__()
if bn_norm == 'IN':
norm = MetaINNorm
else:
norm = MetaBNNorm
self.conv1 = MetaConv2d(inplanes, planes, kernel_size=1, bias=False)
if with_ibn:
self.bn1 = MetaIBNNorm(planes)
else:
self.bn1 = norm(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = norm(planes)
self.conv3 = MetaConv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
if with_se:
self.se = SELayer(planes * self.expansion, reduction)
else:
self.se = nn.Identity()
self.downsample = downsample
self.stride = stride
def forward(self, x, opt=None):
residual = x
out = self.conv1(x, opt)
out = self.bn1(out, opt)
out = self.relu(out)
out = self.conv2(out, opt)
out = self.bn2(out, opt)
out = self.relu(out)
out = self.conv3(out, opt)
out = self.bn3(out, opt)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x, opt)
out += residual
out = self.relu(out)
return out
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x, None
class HyperRouter(nn.Module):
def __init__(self, planes):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.planes = planes
self.fc1 = MetaLinear(planes, planes//16)
self.fc2 = MetaLinear(planes//16, planes*K)
self.fc_classifier = MetaLinear(planes*K, 3)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(-1)
def forward(self, x, opt=None):
x = self.avgpool(x).squeeze(-1).squeeze(-1)
weight = self.relu(F.normalize(self.fc1(x, opt), 2, -1))
weight = self.fc2(weight, opt).reshape(-1, self.planes, K)
domain_cls_logits = self.fc_classifier(weight.reshape(-1, self.planes*K), opt)
x = self.softmax(torch.einsum('bi,bil->bl', x, weight))
return x, domain_cls_logits
class ResNet(nn.Module):
def __init__(self, last_stride, bn_norm, with_ibn, with_se, with_nl, block, layers, non_layers):
self.inplanes = 64
super().__init__()
self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = MetaBNNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0]-1, 1, bn_norm, with_ibn, with_se)
self.adaptor1_base = block(256, 64, 'IN', False, with_se)
self.adaptor1_sub = Bottleneck2(256, 64, bn_norm, with_ibn, with_se)
self.router1 = HyperRouter(256)
self.invariant_norm1 = MetaBNNorm(256)
self.specific_norm1 = MetaBNNorm(256)
self.meta_fuse1 = MetaGate(256)
self.meta_se1 = MetaSELayer(256)
self.map1 = MetaBNNorm(256, bias_freeze=True)
self.layer2 = self._make_layer(block, 128, layers[1]-1, 2, bn_norm, with_ibn, with_se)
self.adaptor2_base = block(512, 128, 'IN', False, with_se)
self.adaptor2_sub = Bottleneck2(512, 128, bn_norm, with_ibn, with_se)
self.router2 = HyperRouter(512)
self.invariant_norm2 = MetaBNNorm(512)
self.specific_norm2 = MetaBNNorm(512)
self.meta_fuse2 = MetaGate(512)
self.meta_se2 = MetaSELayer(512)
self.map2 = MetaBNNorm(512, bias_freeze=True)
self.layer3 = self._make_layer(block, 256, layers[2]-1, 2, bn_norm, with_ibn, with_se)
self.adaptor3_base = block(1024, 256, 'IN', False, with_se)
self.adaptor3_sub = Bottleneck2(1024, 256, bn_norm, with_ibn, with_se)
self.router3 = HyperRouter(1024)
self.invariant_norm3 = MetaBNNorm(1024)
self.specific_norm3 = MetaBNNorm(1024)
self.meta_fuse3 = MetaGate(1024)
self.meta_se3 = MetaSELayer(1024)
self.map3 = MetaBNNorm(1024, bias_freeze=True)
self.layer4 = self._make_layer(block, 512, layers[3]-1, last_stride, bn_norm, with_se=with_se)
self.adaptor4_base = block(2048, 512, 'IN', False, with_se)
self.adaptor4_sub = Bottleneck2(2048, 512, bn_norm, with_ibn, with_se)
self.router4 = HyperRouter(2048)
self.invariant_norm4 = MetaBNNorm(2048)
self.specific_norm4 = MetaBNNorm(2048)
self.meta_fuse4 = MetaGate(2048)
self.meta_se4 = MetaSELayer(2048)
self.map4 = MetaBNNorm(2048, bias_freeze=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# Standard Params
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
self.random_init()
# fmt: off
if with_nl: self._build_nonlocal(layers, non_layers, bn_norm)
else: self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
# fmt: on
def _make_layer(self, block, planes, blocks, stride=1, bn_norm="BN", with_ibn=False, with_se=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = Sequential_ext(
MetaConv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
MetaBNNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, with_se, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, with_se))
return nn.Sequential(*layers)
def _build_nonlocal(self, layers, non_layers, bn_norm):
self.NL_1 = nn.ModuleList(
[Non_local(256, bn_norm) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList(
[Non_local(512, bn_norm) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList(
[Non_local(1024, bn_norm) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList(
[Non_local(2048, bn_norm) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])])
def get_all_conv_layers(self, module):
for m in module:
if isinstance(m, Bottleneck):
for _m in m.modules():
if isinstance(_m, nn.Conv2d):
yield _m
def forward(self, x, epoch, opt=None):
x = self.conv1(x, opt)
x = self.bn1(x, opt)
x = self.relu(x)
x = self.maxpool(x)
weights = []
out_features = []
# layer 1
NL1_counter = 0
if len(self.NL_1_idx) == 0:
self.NL_1_idx = [-1]
for i in range(len(self.layer1)):
x = self.layer1[i](x, opt)
if i == self.NL_1_idx[NL1_counter]:
_, C, H, W = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
x_invariant = self.adaptor1_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor1_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router1(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm1(x_invariant, opt)
x_specific = self.specific_norm1(x_specific, opt)
x = self.meta_fuse1(x_invariant, x_specific, opt)
x = self.meta_se1(x, opt)
temp = self.map1(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
# layer 2
NL2_counter = 0
if len(self.NL_2_idx) == 0:
self.NL_2_idx = [-1]
for i in range(len(self.layer2)):
x = self.layer2[i](x, opt)
if i == self.NL_2_idx[NL2_counter]:
_, C, H, W = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
x_invariant = self.adaptor2_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor2_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router2(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm2(x_invariant, opt)
x_specific = self.specific_norm2(x_specific, opt)
x = self.meta_fuse2(x_invariant, x_specific, opt)
x = self.meta_se2(x, opt)
temp = self.map2(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
# layer 3
NL3_counter = 0
if len(self.NL_3_idx) == 0:
self.NL_3_idx = [-1]
for i in range(len(self.layer3)):
x = self.layer3[i](x, opt)
if i == self.NL_3_idx[NL3_counter]:
_, C, H, W = x.shape
x = self.NL_3[NL3_counter](x)
NL3_counter += 1
x_invariant = self.adaptor3_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor3_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router3(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm3(x_invariant, opt)
x_specific = self.specific_norm3(x_specific, opt)
x = self.meta_fuse3(x_invariant, x_specific, opt)
x = self.meta_se3(x, opt)
temp = self.map3(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
# layer 4
NL4_counter = 0
if len(self.NL_4_idx) == 0:
self.NL_4_idx = [-1]
for i in range(len(self.layer4)):
x = self.layer4[i](x, opt)
if i == self.NL_4_idx[NL4_counter]:
_, C, H, W = x.shape
x = self.NL_4[NL4_counter](x)
NL4_counter += 1
x_invariant = self.adaptor4_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor4_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router4(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm4(x_invariant, opt)
x_specific = self.specific_norm4(x_specific, opt)
x = self.meta_fuse4(x_invariant, x_specific, opt)
x = self.meta_se4(x, opt)
temp = self.map4(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
weights = torch.cat(weights, -1)
return x, weights, out_features
def random_init(self):
for name, m in self.named_modules():
if isinstance(m, MetaConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
nn.init.normal_(m.weight, 0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_pretrained_weights(key):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = model_urls[key].split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
logger.info(f"Pretrain model don't exist, downloading from {model_urls[key]}")
if comm.is_main_process():
gdown.download(model_urls[key], cached_file, quiet=False)
comm.synchronize()
logger.info(f"Loading pretrained model from {cached_file}")
state_dict = torch.load(cached_file, map_location=torch.device('cpu'))
#CHANGE Reduction Version
state_dict = torch.load('/home/pengyi/.cache/torch/checkpoints/resnet50_ibn_a-d9d0bb7b.pth', map_location=torch.device('cpu'))
return state_dict
@BACKBONE_REGISTRY.register()
def build_meta_dynamic_router_resnet_backbone(cfg):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# fmt: off
pretrain = cfg.MODEL.BACKBONE.PRETRAIN
pretrain_path = cfg.MODEL.BACKBONE.PRETRAIN_PATH
last_stride = cfg.MODEL.BACKBONE.LAST_STRIDE
bn_norm = cfg.MODEL.BACKBONE.NORM
with_ibn = cfg.MODEL.BACKBONE.WITH_IBN
with_se = cfg.MODEL.BACKBONE.WITH_SE
with_nl = cfg.MODEL.BACKBONE.WITH_NL
depth = cfg.MODEL.BACKBONE.DEPTH
# fmt: on
num_blocks_per_stage = {
'18x': [2, 2, 2, 2],
'34x': [3, 4, 6, 3],
'50x': [3, 4, 6, 3],
'101x': [3, 4, 23, 3],
}[depth]
nl_layers_per_stage = {
'18x': [0, 0, 0, 0],
'34x': [0, 0, 0, 0],
'50x': [0, 2, 3, 0],
'101x': [0, 2, 9, 0]
}[depth]
block = {
'18x': BasicBlock,
'34x': BasicBlock,
'50x': Bottleneck,
'101x': Bottleneck
}[depth]
model = ResNet(last_stride, bn_norm, with_ibn, with_se, with_nl, block,
num_blocks_per_stage, nl_layers_per_stage)
if pretrain:
# Load pretrain path if specifically
if pretrain_path:
try:
state_dict = torch.load(pretrain_path, map_location=torch.device('cpu'))
logger.info(f"Loading pretrained model from {pretrain_path}")
except FileNotFoundError as e:
logger.info(f'{pretrain_path} is not found! Please check this path.')
raise e
except KeyError as e:
logger.info("State dict keys error! Please check the state dict.")
raise e
else:
key = depth
if with_ibn: key = 'ibn_' + key
# if with_se: key = 'se_' + key
state_dict = init_pretrained_weights(key)
model_dict = model.state_dict()
for k in model_dict.keys():
if k in state_dict:
v = state_dict[k]
if model_dict[k].shape == v.shape:
model_dict[k] = v
else:
if len(v.shape) == 1:
model_dict[k] = v[:model_dict[k].shape[0]]
elif len(v.shape) == 2:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1]]
elif len(v.shape) == 3:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1], :model_dict[k].shape[2]]
elif len(v.shape) == 4:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1], :model_dict[k].shape[2], :model_dict[k].shape[3]]
elif len(v.shape) == 5:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1], :model_dict[k].shape[2], :model_dict[k].shape[3], :model_dict[k].shape[4]]
else:
raise Exception
else:
try:
if 'adaptor1_base' in k:
if model_dict[k].shape == state_dict['layer1.2'+k[13:]].shape:
model_dict[k] = state_dict['layer1.2'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor1_sub' in k:
if 'conv3' in k:
v = state_dict['layer1.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer1.2'+k[12:]]
model_dict[k] = v.repeat(K)
elif model_dict[k].shape == state_dict['layer1.2'+k[12:]].shape:
model_dict[k] = state_dict['layer1.2'+k[12:]]
else:
v = state_dict['layer1.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
elif 'adaptor2_base' in k:
if model_dict[k].shape == state_dict['layer2.3'+k[13:]].shape:
model_dict[k] = state_dict['layer2.3'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor2_sub' in k:
if 'conv3' in k:
v = state_dict['layer2.3'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer2.3'+k[12:]]
model_dict[k] = v.repeat(K)
elif model_dict[k].shape == state_dict['layer2.3'+k[12:]].shape:
model_dict[k] = state_dict['layer2.3'+k[12:]]
else:
v = state_dict['layer2.3'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
elif 'adaptor3_base' in k:
if model_dict[k].shape == state_dict['layer3.5'+k[13:]].shape:
model_dict[k] = state_dict['layer3.5'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor3_sub' in k:
if 'conv3' in k:
v = state_dict['layer3.5'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer3.5'+k[12:]]
model_dict[k] = v.repeat(K)
elif model_dict[k].shape == state_dict['layer3.5'+k[12:]].shape:
model_dict[k] = state_dict['layer3.5'+k[12:]]
else:
v = state_dict['layer3.5'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
elif 'adaptor4_base' in k:
if model_dict[k].shape == state_dict['layer4.2'+k[13:]].shape:
model_dict[k] = state_dict['layer4.2'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor4_sub' in k:
if 'conv3' in k:
v = state_dict['layer4.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer4.2'+k[12:]]
model_dict[k] = v.repeat(K)
elif 'bn1' in k:
if 'IN' in k:
model_dict[k] = state_dict['layer4.2.bn1.'+k.split('.')[-1]][:256]
else:
model_dict[k] = state_dict['layer4.2.bn1.'+k.split('.')[-1]][256:]
elif model_dict[k].shape == state_dict['layer4.2'+k[12:]].shape:
model_dict[k] = state_dict['layer4.2'+k[12:]]
else:
v = state_dict['layer4.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
except Exception:
pass
incompatible = model.load_state_dict(model_dict, strict=False)
if incompatible.missing_keys:
logger.info(
get_missing_parameters_message(incompatible.missing_keys)
)
if incompatible.unexpected_keys:
logger.info(
get_unexpected_parameters_message(incompatible.unexpected_keys)
)
return model
|
peterzpy/ACL-DGReID
|
fastreid/modeling/backbones/meta_dynamic_router_resnet.py
|
meta_dynamic_router_resnet.py
|
py
| 29,474 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.autograd.variable.Variable",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.autograd.variable.Variable",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "torch.nn.Module",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.IBN",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "fastreid.layers.get_norm",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.get_norm",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.SELayer",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn.Identity",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaLinear",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaLinear",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaIBNNorm",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.SELayer",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.nn.Identity",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaINNorm",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaIBNNorm",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.SELayer",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "torch.nn.Identity",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaLinear",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaLinear",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaLinear",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "torch.einsum",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaGate",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaGate",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaGate",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaGate",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaBNNorm",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.Non_local",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.Non_local",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.Non_local",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "fastreid.layers.Non_local",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "fastreid.modeling.ops.MetaConv2d",
"line_number": 490,
"usage_type": "argument"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 492,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 493,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 494,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 494,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 495,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 495,
"usage_type": "name"
},
{
"api_name": "os.path.expanduser",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 511,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 514,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 522,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "errno.EEXIST",
"line_number": 526,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 535,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "fastreid.utils.comm.is_main_process",
"line_number": 539,
"usage_type": "call"
},
{
"api_name": "fastreid.utils.comm",
"line_number": 539,
"usage_type": "name"
},
{
"api_name": "gdown.download",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "fastreid.utils.comm.synchronize",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "fastreid.utils.comm",
"line_number": 542,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 545,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 645,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 645,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 666,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 675,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 688,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 697,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 710,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.avg_pool1d",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "fastreid.utils.checkpoint.get_missing_parameters_message",
"line_number": 732,
"usage_type": "call"
},
{
"api_name": "fastreid.utils.checkpoint.get_unexpected_parameters_message",
"line_number": 736,
"usage_type": "call"
},
{
"api_name": "build.BACKBONE_REGISTRY.register",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "build.BACKBONE_REGISTRY",
"line_number": 552,
"usage_type": "name"
}
] |
71578007227
|
import h5py
import os
from torch.utils.data import Dataset
from DVS_dataload.my_transforms import *
from PIL import Image
import torch
import numpy as np
class DVSGestureDataset(Dataset):
def __init__(self, root, train=True, transform=None):
super(DVSGestureDataset, self).__init__()
self.n = 0
self.root = root
self.train = train
self.transform = transform
if train:
root_train = os.path.join(self.root, 'DvsGesture_train_40step_downsample')
for _, _, self.files_train in os.walk(root_train):
pass
self.n = len(self.files_train)
else:
root_test = os.path.join(self.root, 'DvsGesture_test_40step_downsample')
for _, _, self.files_test in os.walk(root_test):
pass
self.n = len(self.files_test)
def __len__(self):
return self.n
def __getitem__(self, idx):
if self.train:
root_test = os.path.join(self.root, 'DvsGesture_train_40step_downsample')
with h5py.File(root_test + os.sep + self.files_train[idx], 'r', swmr=True, libver="latest") as f:
target = f['label'][()]
data = f['data'][()]
if self.transform is not None:
data = self.transform(data)
return data, target
else:
root_test = os.path.join(self.root, 'DvsGesture_test_40step_downsample')
with h5py.File(root_test + os.sep + self.files_test[idx], 'r', swmr=True, libver="latest") as f:
target = f['label'][()]
data = f['data'][()]
if self.transform is not None:
data = self.transform(data)
return data, target
|
langfengQ/MLF-DSResNet
|
DVS_dataload/DVS_Gesture_dataset.py
|
DVS_Gesture_dataset.py
|
py
| 1,762 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 46,
"usage_type": "attribute"
}
] |
6020834834
|
import os
import cv2
import shutil
source_path = 'test_copy_image'
des_path = 'train_image_label'
def get_all_label_file_to_image_file():
list_file = os.listdir(source_path)
list_label = [file for file in list_file if file.endswith('.txt')]
return list_label
def copy_image_according_to_label():
label_name = get_all_label_file_to_image_file()
print('There are {} label files'.format(len(label_name)))
print(label_name)
# copy files
for name in label_name:
# copy text file
orig_label = os.path.join(source_path, name)
des_label = os.path.join(des_path, name)
print(des_label)
shutil.copy(orig_label, des_label)
# copy image file
img_name = name.split('.')[0] + '.jpg'
orig_img = os.path.join(source_path, img_name)
#print(origin)
des_img = os.path.join(des_path, img_name)
#print(des)
img = cv2.imread(orig_img)
cv2.imwrite(des_img, img)
copy_image_according_to_label()
|
hluong89/calc_bounding_box_YOLO
|
copy_image_according_to_labels.py
|
copy_image_according_to_labels.py
|
py
| 1,014 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 33,
"usage_type": "call"
}
] |
70075665148
|
#!/usr/bin/env python3
"""
Cache class. In the __init__ method, store an instance of the Redis }
client as a private variable named _redis (using redis.Redis()) and
flush the instance using flushdb.
"""
import redis
from typing import Union, Optional, Callable
from uuid import uuid4
from functools import wraps
def count_calls(method: Callable) -> Callable:
"""
count how many times the function was called
"""
key = method.__qualname__
@wraps(method)
def wrapper(self, *args, **kwargs):
""" Wrapper for decorator functionality """
self._redis.incr(key)
return method(self, *args, **kwargs)
return wrapper
def call_history(method: Callable) -> Callable:
"""
store the history of inputs and outputs for a particular function.
"""
inputs = '{}:inputs'.format(method.__qualname__)
outputs = '{}:outputs'.format(method.__qualname__)
@wraps(method)
def wrapper(self, *args, **kwds):
"""wrapper function"""
self._redis.rpush(inputs, str(args))
output = method(self, *args, **kwds)
self._redis.rpush(outputs, output)
return output
return wrapper
def replay(methodName: Callable) -> None:
"""
Display the history of calls of a function
"""
_redis = redis.Redis()
try:
calls = _redis.get(methodName.__qualname__).decode('utf-8')
except Exception:
calls = 0
print(f'{methodName.__qualname__} was called {calls} times:')
inputs = _redis.lrange(methodName.__qualname__ + ':inputs', 0, -1)
outputs = _redis.lrange(methodName.__qualname__ + ':outputs', 0, -1)
for input, output in zip(inputs, outputs):
print('{}(*{}) -> {}'.format(methodName.__qualname__,
input.decode('utf-8'),
output.decode('utf-8')))
class Cache():
"""
Main class for cache
"""
def __init__(self):
""" CONSTRUCTOR"""
self._redis = redis.Redis()
self._redis.flushdb()
@call_history
@count_calls
def store(self, data: Union[str, bytes, int, float]) -> str:
"""
Takes a data as argumen a store in a REDIS db
Remember that data can be a str, bytes, int or float.
Returns: A key in string format
"""
_randomkey = uuid4().__str__()
self._redis.set(_randomkey, data)
return _randomkey
def get(self, key: str,
fn: Optional[Callable] = None) -> Union[str, bytes, int, float]:
""" Get the values as specific func"""
data = self._redis.get(key)
if fn:
return fn(data)
return data
def get_int(self, data: int) -> int:
""" Get the values as int" """
try:
int(data)
return data
except Exception:
return 0
def get_str(self, data: str) -> str:
""" Get the values as str"""
return self._redis.get(data).decode('utf_8')
|
lemejiamo/holbertonschool-backend-storage
|
0x02-redis_basic/exercise.py
|
exercise.py
|
py
| 3,014 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "typing.Callable",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "redis.Redis",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 95,
"usage_type": "name"
}
] |
32715528590
|
#!/usr/bin/env python3
__requires__ = '''
plover>=4.0.0.dev2
setuptools>=30.3.0
'''
from setuptools import setup
from plover_build_utils.setup import BuildPy, BuildUi
BuildPy.build_dependencies.append('build_ui')
BuildUi.hooks = ['plover_build_utils.pyqt:fix_icons']
cmdclass = {
'build_py': BuildPy,
'build_ui': BuildUi,
}
setup(cmdclass=cmdclass)
|
morinted/plover_layout_display
|
setup.py
|
setup.py
|
py
| 362 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "plover_build_utils.setup.BuildPy.build_dependencies.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plover_build_utils.setup.BuildPy.build_dependencies",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "plover_build_utils.setup.BuildPy",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "plover_build_utils.setup.BuildUi.hooks",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "plover_build_utils.setup.BuildUi",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "plover_build_utils.setup.BuildPy",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "plover_build_utils.setup.BuildUi",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "setuptools.setup",
"line_number": 19,
"usage_type": "call"
}
] |
16438405840
|
# -*- coding: utf-8 -*-
"""
# @file name : target.py
# @author : chenzhanpeng https://github.com/chenzpstar
# @date : 2022-01-09
# @brief : FCOS训练目标类
"""
import torch
import torch.nn as nn
from models.config import FCOSConfig
from models.utils import coords2centers, coords2offsets, decode_coords, reshape_feat
class FCOSTarget(nn.Module):
def __init__(self, cfg=None):
super(FCOSTarget, self).__init__()
if cfg is None:
self.cfg = FCOSConfig
else:
self.cfg = cfg
self.strides = self.cfg.strides
self.ranges = self.cfg.ranges
assert len(self.strides) == len(self.ranges)
def forward(self, feats, labels, boxes):
stages_num = len(self.strides)
assert len(feats) == stages_num
cls_targets = []
reg_targets = []
ctr_targets = []
for i in range(stages_num):
stage_targets = self._gen_stage_targets(
feats[i],
labels,
boxes,
self.strides[i],
self.ranges[i],
)
cls_targets.append(stage_targets[0])
reg_targets.append(stage_targets[1])
ctr_targets.append(stage_targets[2])
return cls_targets, reg_targets, ctr_targets
def _gen_stage_targets(self,
feat,
labels,
boxes,
stride,
range,
sample_radio=1.5):
coords = decode_coords(feat, stride).to(device=boxes.device)
feat = reshape_feat(feat) # bchw -> b(hw)c
batch_size, hw = feat.shape[:2] # b(hw)c
boxes_num = boxes.shape[1] # bnc
# 1.计算每个坐标到所有标注框四边的偏移量
offsets = coords2offsets(coords, boxes)
assert offsets.shape == (batch_size, hw, boxes_num, 4)
offsets_min = offsets.min(dim=-1)[0]
offsets_max = offsets.max(dim=-1)[0]
boxes_mask = offsets_min > 0
stage_mask = (offsets_max > range[0]) & (offsets_max <= range[1])
# 2.计算每个坐标到所有标注框中心的偏移量
ctr_offsets = coords2centers(coords, boxes)
assert ctr_offsets.shape == (batch_size, hw, boxes_num, 4)
radius = sample_radio * stride
ctr_offsets_max = ctr_offsets.max(dim=-1)[0]
ctr_mask = ctr_offsets_max <= radius
pos_mask = boxes_mask & stage_mask & ctr_mask
assert pos_mask.shape == (batch_size, hw, boxes_num)
# 3.计算所有标注框面积
areas = (offsets[..., 0] + offsets[..., 2]) * (offsets[..., 1] +
offsets[..., 3])
areas[~pos_mask] = 999999 # neg_areas
areas_min_idx = areas.min(dim=-1)[1].unsqueeze(dim=-1)
areas_min_mask = torch.zeros_like(areas, dtype=torch.bool).scatter(
-1, areas_min_idx, 1)
assert areas_min_mask.shape == (batch_size, hw, boxes_num)
# 4.计算分类目标
labels = torch.broadcast_tensors(
labels[:, None, :], areas.long())[0] # [b,1,n] -> [b,h*w,n]
cls_targets = labels[areas_min_mask].reshape((batch_size, -1, 1))
assert cls_targets.shape == (batch_size, hw, 1)
# 5.计算回归目标
offsets = offsets / stride
reg_targets = offsets[areas_min_mask].reshape((batch_size, -1, 4))
assert reg_targets.shape == (batch_size, hw, 4)
# 6.计算中心度目标
lr_min = torch.min(reg_targets[..., 0], reg_targets[..., 2])
lr_max = torch.max(reg_targets[..., 0], reg_targets[..., 2])
tb_min = torch.min(reg_targets[..., 1], reg_targets[..., 3])
tb_max = torch.max(reg_targets[..., 1], reg_targets[..., 3])
ctr_targets = ((lr_min * tb_min) /
(lr_max * tb_max).clamp(min=1e-10)).sqrt().unsqueeze(
dim=-1)
assert ctr_targets.shape == (batch_size, hw, 1)
# 7.处理负样本
pos_mask = pos_mask.long().sum(dim=-1)
pos_mask = pos_mask >= 1
assert pos_mask.shape == (batch_size, hw)
cls_targets[~pos_mask] = 0
reg_targets[~pos_mask] = -1
ctr_targets[~pos_mask] = -1
return cls_targets, reg_targets, ctr_targets
if __name__ == "__main__":
import torch
torch.manual_seed(0)
model = FCOSTarget()
preds = (
[torch.rand(2, 3, 2, 2)] * 5,
[torch.rand(2, 4, 2, 2)] * 5,
[torch.rand(2, 1, 2, 2)] * 5,
)
labels = torch.rand(2, 3)
boxes = torch.rand(2, 3, 4)
out = model(preds[0], labels, boxes)
[print(stage_out.shape) for branch_out in out for stage_out in branch_out]
|
ydlam/Fcos-main
|
models/target.py
|
target.py
|
py
| 4,816 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.config.FCOSConfig",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "models.utils.decode_coords",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.utils.reshape_feat",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.utils.coords2offsets",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "models.utils.coords2centers",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.bool",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "torch.broadcast_tensors",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 138,
"usage_type": "call"
}
] |
41364868505
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 11:56:58 2019
@author: saransh
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans,AgglomerativeClustering,DBSCAN
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn import metrics
from scipy.optimize import linear_sum_assignment
def purity_score(y_true, y_pred):
contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)
row_ind, col_ind = linear_sum_assignment(-contingency_matrix)
return contingency_matrix[row_ind, col_ind].sum() / np.sum(contingency_matrix)
csv = pd.read_csv("Iris.csv")
datay = csv["Species"]
data = csv.drop(columns = ["Species"],axis = 1)
#pca = PCA(n_components=2).fit(data)
data = PCA(n_components=2).fit_transform(data)
data=pd.DataFrame(data)
print("_________________")
print(" ")
print("2D-Points After Reducing Dimensions ")
print("_________________")
plt.scatter(data.iloc[:,0],data.iloc[:,1],color="blue")
plt.show()
Kmean = KMeans(n_clusters=3)
labels = Kmean.fit_predict(data)
#labels = Kmean.predict(data)
print("_________________")
print(" ")
print("KMeans Clustering")
print("_________________")
plt.scatter(data.iloc[:,0],data.iloc[:,1],c=Kmean.labels_,cmap='viridis')
centers=Kmean.cluster_centers_
plt.scatter(centers[:,0],centers[:,1],s=100,c='black')
plt.show()
print("purity score for KMeans Clustering is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labels)))
print("_________________")
print(" ")
print(" ")
print(" ")
print("_________________")
print(" ")
print("Agglomerative Clustering")
print("_________________")
cluster = AgglomerativeClustering(n_clusters=3, affinity='euclidean', linkage='average')
labelag=cluster.fit_predict(data)
plt.scatter(data.iloc[:,0],data.iloc[:,1], c=cluster.labels_, cmap='rainbow')
plt.show()
'''
Kmean.fit(data)
labels = Kmean.predict(data)
print("_________________")
print(" ")
print("KMeans Clustering")
print("_________________")
plt.scatter(data.iloc[:,0],data.iloc[:,1],c=labels,cmap='viridis')
centers=Kmean.cluster_centers_
plt.scatter(centers[:,0],centers[:,1],s=100,c='black')
plt.show()
print("purity score for KMeans Clustering is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labels)))
print("_________________")
plt.show()
'''
print("purity score for Agglomerative Clustering is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labelag)))
print("_________________")
print(" ")
print(" ")
print("_________________")
print(" ")
print("DBSCAN")
print("_________________")
epsp=[0.05,0.5,0.95]
min_samplesp=[1,5,10,20]
ps=[]
arr = []
for i in epsp:
for j in min_samplesp:
db = DBSCAN(eps = i, min_samples = j)
arr.append([i,j])
labels1 = db.fit_predict(data)
ps.append([purity_score(pd.DataFrame(datay),pd.DataFrame(labels1)),i,j])
psmax=max(ps)
ind = ps.index(psmax)
print('for eps = 0.05 and minpts = 1')
db = DBSCAN(eps = 0.05, min_samples = 1).fit(data)
labels1 = db.labels_
plt.scatter(data.iloc[:,0],data.iloc[:,1], c=db.labels_)
plt.show()
db = DBSCAN(eps = arr[ind][0], min_samples = arr[ind][1]).fit(data)
labels1 = db.labels_
plt.scatter(data.iloc[:,0],data.iloc[:,1], c=db.labels_, cmap='rainbow')
plt.show()
print("purity score for DBSAN is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labels1)))
print("_________________")
|
Saransh0905/Data-Science-3
|
agglomerativeClustering and DBSCAN/lab11.py
|
lab11.py
|
py
| 3,419 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sklearn.metrics.cluster.contingency_matrix",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.cluster",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sklearn.metrics",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "scipy.optimize.linear_sum_assignment",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.AgglomerativeClustering",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.DBSCAN",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.DBSCAN",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "sklearn.cluster.DBSCAN",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 120,
"usage_type": "call"
}
] |
74743616826
|
from PyQt4.QtGui import QApplication, QFrame, QPalette, QLabel, QPixmap
from PyQt4.QtCore import Qt, QRect, QCoreApplication
class BareFrame(QFrame):
def __init__(self, parent=None):
super(BareFrame, self).__init__(parent)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setFrameShadow(QFrame.Plain)
self.setFrameShape(QFrame.Box)
self.setLineWidth(1)
def setBGColor(self, color):
pal = QCoreApplication.instance().palette()
pal.setColor(QPalette.Window, color)
self.setPalette(pal)
class CloseMark(QLabel):
WIDTH = 31
HEIGHT = 31
MARGIN = 10
def __init__(self, pixmap, parent=None):
super(CloseMark, self).__init__(parent)
self.setPixmap(pixmap)
self.toParentTopRight()
def mousePressEvent(self, event):
self.parent().close()
def toParentTopRight(self):
parent = self.parent()
x = parent.width() - CloseMark.MARGIN - CloseMark.WIDTH
y = CloseMark.MARGIN
w = CloseMark.WIDTH
h = CloseMark.HEIGHT
self.setGeometry(QRect(x, y, w, h))
class Dashboard(BareFrame):
SCALE = .85
def __init__(self, parent=None):
from camelot.view.controls.busy_widget import BusyWidget
from camelot.view.model_thread import get_model_thread
super(Dashboard, self).__init__(parent)
desktop = QCoreApplication.instance().desktop()
self.resize(desktop.width() * Dashboard.SCALE, desktop.height() * Dashboard.SCALE)
self.closemark = CloseMark(QPixmap('close-mark.png'), self)
self.setBGColor(Qt.white)
busy_widget = BusyWidget(self)
busy_widget.setMinimumSize( desktop.width() * Dashboard.SCALE, desktop.height() * Dashboard.SCALE )
#self.addPermanentWidget(busy_widget, 0)
mt = get_model_thread()
mt.thread_busy_signal.connect( busy_widget.set_busy )
busy_widget.set_busy(mt.busy())
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
board = Dashboard()
board.show()
sys.exit(app.exec_())
|
kurtraschke/camelot
|
camelot/view/controls/dashboard.py
|
dashboard.py
|
py
| 2,127 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "PyQt4.QtGui.QFrame",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtCore.Qt.FramelessWindowHint",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtCore.Qt",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QFrame.Plain",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui.QFrame",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QFrame.Box",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui.QFrame",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtCore.QCoreApplication.instance",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtCore.QCoreApplication",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QPalette.Window",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui.QPalette",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtCore.QRect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtCore.QCoreApplication.instance",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtCore.QCoreApplication",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "{'BusyWidget': 'camelot.view.controls.busy_widget.BusyWidget', 'get_model_thread': 'camelot.view.model_thread.get_model_thread'}.SCALE",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui.QPixmap",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtCore.Qt.white",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtCore.Qt",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "camelot.view.controls.busy_widget.BusyWidget",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "{'BusyWidget': 'camelot.view.controls.busy_widget.BusyWidget', 'get_model_thread': 'camelot.view.model_thread.get_model_thread'}.SCALE",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "camelot.view.model_thread.get_model_thread",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QApplication",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "{'BusyWidget': 'camelot.view.controls.busy_widget.BusyWidget', 'get_model_thread': 'camelot.view.model_thread.get_model_thread'}",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 69,
"usage_type": "call"
}
] |
32506946053
|
import fcntl
import logging
import socket
import struct
import urllib.request
from urllib.parse import urlparse
from xml.dom import minidom
from functools import wraps
import urllib.error
import xml
SIOCGIFINDEX = 0x8933 # Get interface index
logger = logging.getLogger(__name__)
class NotRetrievedError(Exception):
"""Custom exception for objects that have not been retrieved
Custom object not retrieved exception class. Raised whenever a certain
property for a device or service was not retrieved.
"""
pass
class NotAvailableError(Exception):
"""Custom exception for when a certain URL could not be retrieved
Custom element not retrieved exception class. Raised whenever a value
needed to be accessed could not be retrieved from the URL.
"""
pass
def parse_http_header(header, header_key):
"""Parse HTTP header value
Parse the value of a specific header from a RAW HTTP response.
:param header: String containing the RAW HTTP response and headers
:type header: str
:param header_key: The header name of which to extract a value from
:type header_key: str
:return: The value of the header
:rtype: str
"""
split_headers = header.split('\r\n')
for entry in split_headers:
header = entry.strip().split(':', 1)
if header[0].strip().lower() == header_key.strip().lower():
return ''.join(header[1::]).split()[0]
def make_http_request(url, data=None, headers=None):
"""Helper function for making HTTP requests
Helper function for making HTTP requests using urllib.
:param url: The URL to which a request should be made
:type url: str
:param data: Provide data for the request. Request method will be set to
POST if data is provided
:type data: str
:param headers: Provide headers to send with the request
:type headers: dict
:return: A urllib.Request.urlopen object
:rtype: urllib.Request.urlopen
"""
if not headers:
headers = {}
# If data is provided the request method will automatically be set to POST
# by urllib
request = urllib.request.Request(url, data=data, headers=headers)
return urllib.request.urlopen(request)
def _device_description_required(func):
"""Decorator for checking whether the device description is available on a
device.
"""
@wraps(func)
def wrapper(device, *args, **kwargs):
if device.description is None:
raise NotRetrievedError(
'No device description retrieved for this device.')
elif device.description == NotAvailableError:
return
return func(device, *args, **kwargs)
return wrapper
def _get_if_index(ifname, fd):
"""Get the index corresponding to interface ifname used by socket fd"""
ifreq = struct.pack('16si', ifname.encode(), 0)
res = fcntl.ioctl(fd, SIOCGIFINDEX, ifreq)
return int(struct.unpack('16si', res)[1])
class SSDPDevice:
"""Represents an SSDP device
Object for representing an SSDP device.
:param address: SSDP device address
:type address: tuple
:param response: Device discovery response data
:type response: str
"""
def __init__(self, address, response):
self.address = address
self.host = address[0]
self.port = address[1]
self.response = response
self.description = None
self.friendly_name = None
self.type_ = None
self.base_url = None
self._get_description_request(parse_http_header(response, 'Location'))
self._get_friendly_name_request()
self._get_type_request()
self._get_base_url_request()
def get_friendly_name(self):
"""Get the friendly name for the device
Gets the device's friendly name
:return: Friendly name of the device
:rtype: str
"""
return self.friendly_name
def _get_description_request(self, url):
try:
device_description = make_http_request(url).read()
self.description = device_description
return device_description.decode()
except (urllib.error.HTTPError, urllib.error.URLError):
self.description = NotAvailableError
return None
@_device_description_required
def _get_friendly_name_request(self):
root = minidom.parseString(self.description)
device_friendly_name = root.getElementsByTagName(
'friendlyName')[0].firstChild.nodeValue
self.friendly_name = device_friendly_name
return self.friendly_name
@_device_description_required
def _get_type_request(self):
root = minidom.parseString(self.description)
device_type = root.getElementsByTagName(
'deviceType')[0].firstChild.nodeValue
self.type_ = device_type
return self.type_
@_device_description_required
def _get_base_url_request(self):
location_header_value = parse_http_header(self.response, 'Location')
header_url = urlparse(location_header_value)
root = minidom.parseString(self.description)
try:
parsed_url = urlparse(
root.getElementsByTagName('URLBase')[0].firstChild.nodeValue)
if parsed_url.port is not None:
base_url = '{}://{}'.format(parsed_url.scheme,
parsed_url.netloc)
else:
base_url = '{}://{}:{}'.format(parsed_url.scheme,
parsed_url.netloc,
header_url.port)
except (IndexError, AttributeError):
base_url = '{}://{}'.format(header_url.scheme, header_url.netloc)
self.base_url = base_url
return base_url
class SSDPHeader:
def __init__(self, **headers):
"""
Example M-SEARCH header:
------------------------------------------------------------------------
M-SEARCH * HTTP/1.1 SSDP method for search requests
HOST: 239.255.255.250:1900 SSDP multicast address and port (REQUIRED)
MAN: "ssdp:discover" HTTP Extension Framework scope (REQUIRED)
MX: 2 Maximum wait time in seconds (REQUIRED)
ST: upnp:rootdevice Search target (REQUIRED)
------------------------------------------------------------------------
"""
self.headers = {}
self.set_headers(**headers)
self._available_methods = ['M-SEARCH']
self.method = None
self.host = self.headers.get('HOST')
self.man = self.headers.get('MAN')
self.mx = self.headers.get('MX')
self.st = self.headers.get('ST')
def _check_method_required_params(self):
if self.method == 'M-SEARCH':
# M-SEARCH required parameters: HOST, MAN, MX, ST
if None in [self.host, self.man, self.mx, self.st]:
raise ValueError(
'M-SEARCH method requires HOST, MAN, MX and ST headers '
'to be set.')
def set_method(self, method):
method = method.upper()
if method in self._available_methods:
self.method = method.upper()
else:
raise ValueError('Method must be either' +
' or '.join(self._available_methods))
def set_header(self, name, value):
self.headers[name.upper()] = value
def set_headers(self, **headers):
for key, value in headers.items():
self.set_header(key.upper(), value)
class SSDPRequest(SSDPHeader):
"""Create and perform an SSDP request
:param method: SSDP request method [M-SEARCH]
"""
def __init__(self,
ssdp_mcast_addr='239.255.255.250',
ssdp_port=1900,
src_port=None,
interface=None,
**headers):
super().__init__(**headers)
self.SSDP_MCAST_ADDR = ssdp_mcast_addr
self.SSDP_PORT = ssdp_port
self.set_header('HOST', "{}:{}".format(self.SSDP_MCAST_ADDR,
self.SSDP_PORT))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if (src_port is not None):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', src_port))
if (interface is not None):
try:
ifindex = _get_if_index(interface, self.socket.fileno())
except OSError:
logger.error(
"Could not find interface \"{}\"".format(interface))
return
except ValueError:
logger.error(
"Failed to parse the index of interface \"{}\"".format(
interface))
return
ip_mreqn = struct.pack(
'4s4si',
socket.inet_aton(ssdp_mcast_addr),
socket.inet_aton('0.0.0.0'), # INADDR_ANY
ifindex)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
ip_mreqn)
def __del__(self):
self.socket.close()
def m_search(self, discover_delay=2, st='ssdp:all', **headers):
"""Perform an M-SEARCH SSDP request
Send an SSDP M-SEARCH request for finding UPnP devices on the network.
:param discover_delay: Device discovery delay in seconds
:type discover_delay: int
:param st: Specify device Search Target
:type st: str
:param headers: Specify M-SEARCH specific headers
:type headers: str
:return: List of device that replied
:rtype: list
"""
self.set_method('M-SEARCH')
self.set_header('MAN', '"ssdp:discover"')
self.set_header('MX', discover_delay)
self.set_header('ST', st)
self.set_headers(**headers)
self.socket.settimeout(discover_delay)
devices = self._send_request(self._get_raw_request())
for device in devices:
yield device
def _get_raw_request(self):
"""Get raw request data to send to server"""
final_request_data = ''
if self.method is not None:
ssdp_start_line = '{} * HTTP/1.1'.format(self.method)
else:
ssdp_start_line = 'HTTP/1.1 200 OK'
final_request_data += '{}\r\n'.format(ssdp_start_line)
for header, value in self.headers.items():
final_request_data += '{}: {}\r\n'.format(header, value)
final_request_data += '\r\n'
return final_request_data
def _send_request(self, message):
self.socket.sendto(message.encode(),
(self.SSDP_MCAST_ADDR, self.SSDP_PORT))
devices = []
try:
while True:
# UDP packet data limit is 65507 imposed by IPv4
# https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
response, addr = self.socket.recvfrom(65507)
try:
device = SSDPDevice(addr, response.decode())
except xml.parsers.expat.ExpatError:
continue
devices.append(device)
except socket.timeout:
pass
return devices
class UPnP:
"""UPnP object
A UPnP object used for device discovery
"""
def __init__(self, src_port=None, interface=None):
self.ssdp = SSDPRequest(src_port=src_port, interface=interface)
self.discovered_devices = []
def discover(self, delay=2, **headers):
"""Find UPnP devices on the network
Find available UPnP devices on the network by sending an M-SEARCH
request.
:param delay: Discovery delay, amount of time in seconds to wait for a
reply from devices
:type delay: int
:param headers: Optional headers for the request
:return: List of discovered devices
:rtype: list
"""
discovered_devices = []
for device in self.ssdp.m_search(discover_delay=delay,
st='upnp:rootdevice',
**headers):
discovered_devices.append(device)
self.discovered_devices = discovered_devices
return self.discovered_devices
|
Blockstream/satellite
|
blocksatcli/upnp.py
|
upnp.py
|
py
| 12,542 |
python
|
en
|
code
| 949 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.Request",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "fcntl.ioctl",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "urllib.request.error",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_REUSEADDR",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "struct.pack",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "socket.inet_aton",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "socket.inet_aton",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "socket.IPPROTO_IP",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "socket.IP_MULTICAST_IF",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "xml.parsers",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "socket.timeout",
"line_number": 359,
"usage_type": "attribute"
}
] |
14629235583
|
import numpy as np
import time
import os
import sys
from scipy.stats import poisson, binom
from scipy.special import erf as erf
from admin import make_glob_array
import multiprocessing
# from Sim_show import Sim_fit
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import matplotlib.colors as mcolors
from make_3D import make_3D
pmts=np.array([0,1,4,6,7,3,10,13,15,17,18,5,11,12,14])
path='/home/gerak/Desktop/DireXeno/011220/'
data=np.load(path+'h.npz')
Angs=data['Angs']
Angs10=data['Angs10']
Angsbins=data['Angsbins']
H=data['H']
G=data['G']
Spectrum=data['spectrum']
Sbins=data['Sbins']
Spectra=np.transpose(data['spectra'], (2,0,1))
sbins=data['sbins']
FSbins=data['FSbins']
FullSpectrum=data['Fullspectrum']
w=data['W']
Wbins=data['Wbins']
Wbands=data['Wbands']
Sbands=data['Sbands']
W20_40=data['W20_40']
W20_40bins=data['W20_40bins']
data=np.loadtxt(path+'NRel.txt')
NDep=[]
NSpec=[]
for i in range(len(data)):
NDep.append(data[i][0])
NSpec.append(data[i][1])
NSpec=NSpec/np.sum(NSpec)
N=10
data=np.load('Q.npz')
ls=data['ls']
Sspectrum=data['Ravel_Spectrum'].reshape((N, 1, np.shape(Spectrum)[0]))[:,0]
Sspectra=data['Ravel_Spectra'].reshape((N, 1, np.shape(Spectra)[0], np.shape(Spectra)[1], np.shape(Spectra)[2]))[:,0]
SG=data['Ravel_G'].reshape((N, 1, np.shape(G)[0], np.shape(G)[1], np.shape(G)[2]))[:,0]
SH=data['Ravel_H'].reshape((N, 1, np.shape(H)[0], np.shape(H)[1], np.shape(H)[2], np.shape(H)[3]))[:,0]
SAngs=data['Ravel_Angs'].reshape((N, 1, np.shape(Angs)[0], np.shape(Angs)[1]))[:,0]
SFullspectrum=data['Ravel_Fullspectrum'].reshape((N, 1, np.shape(FullSpectrum)[1]))[:,0]
SW=data['Ravel_W'].reshape((N, 1, np.shape(w)[0]))[:,0]
keVbins=data['keVbins']
PEbins=data['PEbins']
keVPE=data['Ravel_KeVPE'].reshape((N, len(keVbins)-1, len(PEbins)-1))
y=np.arange(0, 600)
y1=data['adn']*y+data['bdn']
y2=data['aup']*y+data['bup']
plt.figure()
plt.title('Energy spectrum')
plt.step(NDep, NSpec, where='mid')
# plt.yscale('log')
plt.figure(figsize=(20,10))
X, Y= np.meshgrid(0.5*(keVbins[1:]+keVbins[:-1]), 0.5*(PEbins[1:]+PEbins[:-1]))
plt.pcolor(Y, X, np.mean(keVPE, axis=0), norm=mcolors.PowerNorm(0.3))
plt.plot(y, y1, 'k--', label='{}x+{}'.format(np.round(data['adn'],decimals=2), np.round(data['bdn'],decimals=2)), linewidth=5)
plt.plot(y, y2, 'k--', label='{}x+{}'.format(np.round(data['aup'],decimals=2), np.round(data['bup'],decimals=2)), linewidth=5)
plt.xlabel('PEs', fontsize=25)
plt.ylabel('keV', fontsize=25)
plt.tick_params(axis='both', which='major', labelsize=20)
cbar=plt.colorbar()
cbar.ax.tick_params(labelsize=25)
plt.legend(fontsize=35, loc='upper right')
plt.xlim(0, np.amax(PEbins))
plt.ylim(0, np.amax(keVbins))
plt.figure()
plt.title('W')
plt.bar(0.5*(Wbins[1:]+Wbins[:-1]), w, width=Wbins[1:]-Wbins[:-1], color='r', alpha=0.5)
plt.errorbar(0.5*(Wbins[1:]+Wbins[:-1]), np.mean(SW, axis=0), np.std(SW, axis=0), fmt='.')
for i in range(len(Wbands)):
plt.axvline(Wbands[i], 0, 1)
plt.yscale('log')
#
plt.figure()
plt.title('Full Spectrum')
plt.step(0.5*(FSbins[1:]+FSbins[:-1]), np.sum(FullSpectrum, axis=0), where='mid')
for i in range(len(Wbands)-1):
plt.bar(0.5*(FSbins[1:]+FSbins[:-1]), FullSpectrum[i], width=FSbins[1:]-FSbins[:-1], label='spectrum', alpha=0.5)
plt.errorbar(0.5*(FSbins[1:]+FSbins[:-1]), np.mean(SFullspectrum, axis=0), np.std(SFullspectrum, axis=0), fmt='.', label='A')
plt.yscale('log')
plt.legend()
plt.figure()
plt.title('Global Spectrum')
plt.bar(0.5*(Sbins[1:]+Sbins[:-1]), Spectrum, width=Sbins[1:]-Sbins[:-1], label='spectrum', color='r', alpha=0.5)
plt.errorbar(0.5*(Sbins[1:]+Sbins[:-1]), np.mean(Sspectrum, axis=0), np.std(Sspectrum, axis=0), fmt='.', label='A')
plt.legend()
plt.yscale('log')
t=np.arange(100)
fig, ax=plt.subplots(2,3)
for k in range(len(Sbands)-1):
#plt.title('The temporal structure in different energy ranges (NRs) {}-{}'.format(Sbands[k], Sbands[k+1]), fontsize=35)
data=np.ravel(G[k])
model=np.ravel(np.mean(SG[:,k], axis=0))
N=np.sum(np.sum(G[k].T*np.arange(np.shape(G)[1]), axis=1)/np.sum(G[k], axis=0))
np.ravel(ax)[k].step(t, (np.sum(G[k].T*np.arange(np.shape(G)[1]), axis=1)/np.sum(G[k], axis=0))/N, where='mid', label='PEs: {}-{}'.format(Sbands[k], Sbands[k+1]), linewidth=3)
np.ravel(ax)[k].errorbar(t, np.mean(np.sum(np.transpose(SG[:,k], (0,2,1))*np.arange(np.shape(G)[1]), axis=-1)/np.sum(SG[:,k], axis=1), axis=0)/N,
np.std(np.sum(np.transpose(SG[:,k], (0,2,1))*np.arange(np.shape(G)[1]), axis=-1)/np.sum(SG[:,k], axis=1), axis=0)/N, fmt='.', label='{}'.format(-np.sum(data*np.log((model+1e-10)/(data+1e-10))+data-model)))
np.ravel(ax)[k].legend(fontsize=10)
np.ravel(ax)[k].set_yscale('log')
# plt.xlabel('Time [ns]', fontsize=35)
# plt.ylabel('The probability to resolve a PE /\naveage number of PEs at\n the energy range', fontsize=35)
# plt.ylabel('The average number of\nPEs resolved (normalized)', fontsize=35)
# plt.tick_params(axis='both', which='major', labelsize=20)
# plt.show()
# for k in range(len(Sbands)-1):
# fig, ax=plt.subplots(4,4)
# fig.suptitle('PMT spectra ({}-{})'.format(Sbands[k], Sbands[k+1]))
# for i in range(15):
# np.ravel(ax)[i].step(0.5*(sbins[1:]+sbins[:-1]), Spectra[k,:,i], where='mid', label='A')
# np.ravel(ax)[i].errorbar(0.5*(sbins[1:]+sbins[:-1]), np.mean(Sspectra[:,k,:,i], axis=0), np.std(Sspectra[:,k,:,i], axis=0), fmt='.', label='A')
# np.ravel(ax)[i].legend()
# np.ravel(ax)[i].set_yscale('log')
# for k in range(len(Sbands)-1):
for k in range(1):
fig, ax=plt.subplots(4,4)
fig.suptitle('PMT temporal ({}-{})'.format(Sbands[k], Sbands[k+1]))
for i in range(15):
np.ravel(ax)[i].step(t, np.sum(H[k, :,:,i].T*np.arange(np.shape(H)[1]), axis=1)/np.sum(H[k, :,:,i], axis=0), where='mid', label='PMT{}'.format(pmts[i]))
# np.ravel(ax)[i].errorbar(t, np.mean(np.sum(np.transpose(SH[k,:,:,:,i], (0,2,1))*np.arange(np.shape(H)[1]), axis=-1)/np.sum(SH[k,:,:,:,i], axis=1), axis=0),
# np.std(np.sum(np.transpose(SH[k,:,:,:,i], (0,2,1))*np.arange(np.shape(H)[1]), axis=-1)/np.sum(SH[k,:,:,:,i], axis=1), axis=0), fmt='.')
np.ravel(ax)[i].set_yscale('log')
np.ravel(ax)[i].legend()
plt.show()
|
gerakolt/DireXeno
|
fit/show.py
|
show.py
|
py
| 6,250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.step",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "numpy.meshgrid",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pcolor",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.PowerNorm",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "numpy.round",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "numpy.round",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "numpy.amax",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "numpy.amax",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.step",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "numpy.ravel",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "numpy.ravel",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.ravel",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
}
] |
25068821679
|
import pygame
from math import *
import time
pygame.init()
pygame.display.set_caption("sprite sheet") # sets the window title
screen = pygame.display.set_mode((1000, 800)) # creates game screen
screen.fill((0,0,0))
clock = pygame.time.Clock() #set up clock
#Variables and stuff (Start)-------------------------------------------------------------------
red_pawn = pygame.image.load('RedPawn.png') #load your spritesheet
white_pawn = pygame.image.load('WhitePawn.png') #load your spritesheet
background = pygame.image.load('Chess board.png')#loads background
white_rook = pygame.image.load('WhiteRook.png')
red_rook = pygame.image.load('RedRook.png')
white_queen = pygame.image.load('WhiteQueen.png')
red_queen = pygame.image.load('RedQueen.png')
white_king = pygame.image.load('WhiteKing.png')
red_king = pygame.image.load('RedKing.png')
white_bishop = pygame.image.load('WhiteBishop.png')
red_bishop = pygame.image.load('RedBishop.png')
red_knight = pygame.image.load('RedKnight.png')
white_knight = pygame.image.load('WhiteKnight.png')
White=(255,255,255)
PIECEWIDTH = 100
checkmate = False #variable to run our game loop
whiteturn = True
hasPicked = False
clicked = False
TurnNum=0
ticks = 1
tickss = 1
t = 600
t2 = 600
pygame.display.set_caption('Show Text')
font = pygame.font.Font('freesansbold.ttf', 32)
#Variables and stuff (End)--------------------------------------------------------------------
#text to screen setup
#text input------------------------------------------
n1 = input('Enter your name player 1:')
n2 = input('Enter your name player 2')
#End of text input----------------------------------
#Pieces start-----------------------------------------------
class pieces:
def __init__(self,xpos,ypos,type):
self.xpos = xpos
self.ypos = ypos
self.lifted = False
self.type = type
self.alive = True
self.row=0
self.column=0
def updatePos(self):
self.row = int(self.xpos/100)
self.column = int(self.ypos/100)
print("my row and column is ", self.row, self.column)
self.xpos = self.row*100
self.ypos = self.column*100
def isClicked(self,x,y):
global hasPicked
global TurnNum
print("checking distance")
if self.alive == True:
if hasPicked == False and self.lifted == False:
if (sqrt(((self.xpos+50)-x)**2 + ((self.ypos+50)-y)**2)) <= 50:#check if the distance b/t (x, y) and (self.xpos, self.ypos) is LESS than 50
self.lifted = True #if true, set lifted to true
hasPicked = True
TurnNum+=1
print("lifted!")
else:
print("no collision")
def move(self, x, y):
if self.lifted == True:#if islifed is true, set self.xpos to x, and self.ypos to y
self.xpos = x-50
self.ypos = y-50
def draw(self):
if self.alive == True:
#pawns
if self.type == "WhitePawn":
screen.blit(white_pawn, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedPawn":
screen.blit(red_pawn, (self.xpos, self.ypos), (0, 0, 100, 100))
#knights
if self.type == "WhiteKnight":
screen.blit(white_knight, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedKnight":
screen.blit(red_knight, (self.xpos, self.ypos), (0, 0, 100, 100))
#bishop
if self.type == "WhiteBishop":
screen.blit(white_bishop, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedBishop":
screen.blit(red_bishop, (self.xpos, self.ypos), (0, 0, 100, 100))
#rook
if self.type == "WhiteRook":
screen.blit(white_rook, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedRook":
screen.blit(red_rook, (self.xpos, self.ypos), (0, 0, 100, 100))
#king
if self.type == "WhiteKing":
screen.blit(white_king, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedKing":
screen.blit(red_king, (self.xpos, self.ypos), (0, 0, 100, 100))
#queen
if self.type == "WhiteQueen":
screen.blit(white_queen, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedQueen":
screen.blit(red_queen, (self.xpos, self.ypos), (0, 0, 100, 100))
#Pieces End-----------------------------------------------------------
#python list (like a vector)
whitelist = []
redlist = []
#White Pawns
WhitePawn1 = pieces(0, 100, "WhitePawn")
whitelist.append(WhitePawn1)
WhitePawn2 = pieces(100, 100, "WhitePawn")
whitelist.append(WhitePawn2)
WhitePawn3 = pieces(200, 100, "WhitePawn")
whitelist.append(WhitePawn3)
WhitePawn4 = pieces(300, 100, "WhitePawn")
whitelist.append(WhitePawn4)
WhitePawn5 = pieces(400, 100, "WhitePawn")
whitelist.append(WhitePawn5)
WhitePawn6 = pieces(500, 100, "WhitePawn")
whitelist.append(WhitePawn6)
WhitePawn7 = pieces(600, 100, "WhitePawn")
whitelist.append(WhitePawn7)
WhitePawn8 = pieces(700, 100, "WhitePawn")
whitelist.append(WhitePawn8)
#Red Pawns
RedPawn1 = pieces(0, 600, "RedPawn")
redlist.append(RedPawn1)
RedPawn2 = pieces(100, 600, "RedPawn")
redlist.append(RedPawn2)
RedPawn3 = pieces(200, 600, "RedPawn")
redlist.append(RedPawn3)
RedPawn4 = pieces(300, 600, "RedPawn")
redlist.append(RedPawn4)
RedPawn5 = pieces(400, 600, "RedPawn")
redlist.append(RedPawn5)
RedPawn6 = pieces(500, 600, "RedPawn")
redlist.append(RedPawn6)
RedPawn7 = pieces(600, 600, "RedPawn")
redlist.append(RedPawn7)
RedPawn8 = pieces(700, 600, "RedPawn")
redlist.append(RedPawn8)
#White Rooks
WhiteRook1 = pieces(0, 0, "WhiteRook")
whitelist.append(WhiteRook1)
WhiteRook2 = pieces(700, 0, "WhiteRook")
whitelist.append(WhiteRook2)
#Red Rooks
RedRook1 = pieces(0, 700, "RedRook")
redlist.append(RedRook1)
RedRook2 = pieces(700, 700, "RedRook")
redlist.append(RedRook2)
#White Knights
WhiteKnight1 = pieces(100, 0, "WhiteKnight")
whitelist.append(WhiteKnight1)
WhiteKnight2 = pieces(600, 0, "WhiteKnight")
whitelist.append(WhiteKnight2)
#Red Knights
RedKnight1 = pieces(100, 700, "RedKnight")
redlist.append(RedKnight1)
RedKnight2 = pieces(600, 700, "RedKnight")
redlist.append(RedKnight2)
#White Bishops
WhiteBishop1 = pieces(200, 0, "WhiteBishop")
whitelist.append(WhiteBishop1)
WhiteBishop2 = pieces(500, 0, "WhiteBishop")
whitelist.append(WhiteBishop2)
#Red Bishop
RedBishop1 = pieces(200, 700, "RedBishop")
redlist.append(RedBishop1)
RedBishop2 = pieces(500, 700, "RedBishop")
redlist.append(RedBishop2)
#White and Red King
WhiteKing = pieces(300, 0, "WhiteKing")
whitelist.append(WhiteKing)
RedKing = pieces(300, 700, "RedKing")
redlist.append(RedKing)
#White and Red Queen
WhiteQueen = pieces(400, 0, "WhiteQueen")
whitelist.append(WhiteQueen)
RedQueen = pieces(400, 700, "RedQueen")
redlist.append(RedQueen)
#MAP:
map = [[3, 4, 5, 6, 7, 5, 4, 3],
[2, 2, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[8, 9, 10, 11, 12, 10, 9, 8]]
while not checkmate:
clock.tick(60)
#print("clicked is ", clicked)
for event in pygame.event.get():
if event.type == pygame.QUIT:
checkmate = True
if event.type == pygame.MOUSEBUTTONDOWN:
if pygame.MOUSEBUTTONDOWN:
print("click")
if clicked == False:
clicked = True
elif clicked == True:
clicked = False
if event.type == pygame.MOUSEBUTTONUP:
print("unclick")
clicked = False
if event.type == pygame.MOUSEMOTION: #check if mouse moved
mousePos = event.pos #refreshes mouse position
# print("mouse position: (",mousePos[0]," , ",mousePos[1], ")")
#COLLISION
#White updatepos
for i in range(len(whitelist)):
whitelist[i].updatePos()
#red updatepos
for i in range(len(redlist)):
redlist[i].updatePos()
if clicked ==True:
if whiteturn is True:# if whiteturn is true white pieces go
#white clicked function
for i in range(len(whitelist)):
whitelist[i].isClicked(mousePos[0],mousePos[1])
if whiteturn is False: # if whiteturn is false red goes
#red clicked function
for i in range(len(redlist)):
redlist[i].isClicked(mousePos[0],mousePos[1])
if clicked == False:
hasPicked = False
#white pieces lifted set down
for i in range(len(whitelist)):
whitelist[i].lifted = False
#red piece lifted set down
for i in range(len(redlist)):
redlist[i].lifted = False
#White pieces move
for i in range(len(whitelist)):
whitelist[i].move(mousePos[0],mousePos[1])
#Red pieces move
for i in range(len(redlist)):
redlist[i].move(mousePos[0],mousePos[1])
#print to screen
if whiteturn == True:
ticks +=1
if ticks%60==0: #add a ticker variable here, add 1 to it, when it hits 60, decrement t and reset time
t-=1
if t == 0:
gameover = True
if whiteturn == False:
tickss +=1
if tickss%60==0:
t2-=1
if t2 == 0:
gameover = True
text = font.render(str(t), True, (0,255,0))
textRect = text.get_rect()
textRect.center = (900, 0)
text2 = font.render(str(t2), True, (0,255,0))
textRect2 = text2.get_rect()
textRect2.center = (900, 800)
Name = font.render(str(n1), True, (255,255,255))
name = Name.get_rect()
name.center = (900, 100)
Name2 = font.render(str(n2), True, (255,255,255))
name2 = Name2.get_rect()
name2.center = (900, 700)
for i in range(len(whitelist)):
for j in range(len(redlist)):
if whiteturn == False:
if whitelist[i].row/100 == redlist[j].row/100 and whitelist[i].column/100 == redlist[j].column/100:
redlist[j].alive = False
print(redlist[j].type, "got killed")
if whiteturn == True:
if redlist[j].row/100 == whitelist[i].row/100 and redlist[j].column/100 == whitelist[i].column/100:
whitelist[i].alive = False
print(whitelist[i].type, "got killed")
if WhiteKing.alive == False:
checkmate = True
if RedKing.alive == False:
checkmate = True
# RENDER--------------------------------------------------------------------------------
screen.fill((0,0,0)) #wipe screen so it doesn't smear
screen.blit(background, (0,0))
if TurnNum % 2==0:
whiteturn=True
print("white's turn")
else:
print("red's turn")
whiteturn = False
print("Turn Num:", TurnNum)
#draws white pieces
for i in range(len(whitelist)):
whitelist[i].draw()
#draws red pieces
for i in range(len(redlist)):
redlist[i].draw()
screen.blit(Name, (900,50))
screen.blit(Name2, (900,700))
screen.blit(text, (900, 100))
screen.blit(text2, (900, 750))
pygame.display.flip()#this actually puts the pixel on the screen
#end game loop------------------------------------------------------------------------------
pygame.quit()
|
richfls/chess
|
main9.py
|
main9.py
|
py
| 11,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONUP",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEMOTION",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 357,
"usage_type": "call"
}
] |
5432983759
|
import logging
import warnings
from typing import List, Tuple
import numpy as np
import pandas as pd
from anndata import AnnData
from mudata import MuData
from pandas.api.types import is_numeric_dtype
from sklearn.neighbors import KNeighborsClassifier
from ..utils import check_transition_rule, get_views_from_structure
from .MultiViewAtlas import MultiViewAtlas, _harmonize_mdata_full
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=UserWarning)
# logger = logging.getLogger(__name__)
# logger.propagate = False
# ch = RichHandler(level=logging.INFO, show_path=False, show_time=False)
# formatter = logging.Formatter("%(message)s")
# ch.setFormatter(formatter)
# logger.addHandler(ch)
def load_query(
mvatlas: MultiViewAtlas,
adata_query: AnnData,
) -> MultiViewAtlas:
"""Load query data to full view of multi-view atlas
Params:
-------
mvatlas:
MultiViewAtlas object
adata_query:
AnnData of query data
Returns:
--------
MultiViewAtlas object with mapped query cells in full view
"""
# Define all current view - next view pairs for assignment
view_pairs = mvatlas.get_view_pairs()
# Check if query cells already in mdata
if adata_query.obs_names.isin(mvatlas.mdata.obs_names).all():
vdata_full = mvatlas.mdata["full"].copy()
else:
vdata_full = concatenate_query(mvatlas, adata_query, "full")
# Check that at least one mapping from full view is possible with transition rules
rules_from_full = view_pairs[view_pairs.depth == 0]["transition_rule"].unique()
missing_rules = []
for r in rules_from_full:
try:
if check_transition_rule(adata_query, r) is None:
continue
except ValueError:
missing_rules.append(r)
if len(rules_from_full) == len(missing_rules):
raise ValueError(
f"""
No mapping possible from full view. Please add info on rules {missing_rules} to query dataset
"""
)
mdata = mvatlas.mdata.copy()
mdata.mod["full"] = vdata_full.copy()
try:
mdata.update()
except KeyError:
mdata.update()
del mdata.obsm["view_assign"]
mvatlas_mapped = MultiViewAtlas(mdata, rename_obsm=False)
mvatlas_mapped.view_transition_rule = mvatlas.view_transition_rule.copy()
_harmonize_mdata_full(mvatlas_mapped)
return mvatlas_mapped
def split_query(
mvatlas_mapped: MultiViewAtlas,
) -> MultiViewAtlas:
"""Assign query data to views in atlas, based on transition rules.
Params:
-------
mvatlas:
MultiViewAtlas object
Returns:
--------
MultiViewAtlas object with mapped query cells
"""
# Define all current view - next view pairs for assignment
view_pairs = mvatlas_mapped.get_view_pairs()
# Check that load_query was run
try:
if sum(mvatlas_mapped.mdata["full"].obs["dataset_group"] == "query") == 0:
raise ValueError("No query cells in mdata - run load_query first")
except KeyError:
raise ValueError("No query cells in mdata - run load_query first")
vdata_full = mvatlas_mapped.mdata["full"].copy()
new_view_assign = pd.DataFrame()
vdata_dict = {}
vdata_dict["full"] = vdata_full.copy()
for _, row in view_pairs.iterrows():
depth = row["depth"]
current_view = row["parent_view"]
next_view = row["child_view"]
try:
n_query_current = sum(vdata_dict[current_view].obs["dataset_group"] == "query")
except KeyError:
n_query_current = 0
try:
n_query_next = sum(mvatlas_mapped.mdata[next_view].obs["dataset_group"] == "query")
except KeyError:
n_query_next = 0
# if "dataset_group" in vdata_dict[current_view].obs:
if n_query_current > 0:
adata_query = vdata_dict[current_view][vdata_dict[current_view].obs["dataset_group"] == "query"].copy()
logging.info(f"Assigning to {next_view} from {current_view} with rule {row['transition_rule']}")
# print(adata_query)
# print(vdata_dict[current_view])
# print(mvatlas_mapped.mdata[current_view])
if n_query_next > 0:
logging.info(f"Query cells already in {next_view}")
v_assign = mvatlas_mapped.mdata.obsm["view_assign"][[next_view]]
vdata_dict[next_view] = mvatlas_mapped.mdata[next_view].copy()
else:
adata_query_concat = AnnData(obs=adata_query.obs, obsm=adata_query.obsm, obsp=adata_query.obsp)
if depth > 0:
adata_query_concat = adata_query_concat[
new_view_assign.loc[adata_query_concat.obs_names, current_view] == 1
].copy()
v_assign, next_view_adata = map_next_view(
mvatlas_mapped, adata_query_concat, current_view=current_view, next_view=next_view
)
vdata_dict[next_view] = next_view_adata.copy()
else:
logging.info(f"No query cells in {current_view}")
v_assign = mvatlas_mapped.mdata.obsm["view_assign"][[next_view]]
vdata_dict[next_view] = mvatlas_mapped.mdata[next_view].copy()
new_view_assign = pd.concat([new_view_assign, v_assign], 1)
new_view_assign = new_view_assign.fillna(0)
vdata_full.obsm["view_assign"] = new_view_assign.copy()
vdata_full.uns["view_hierarchy"] = mvatlas_mapped.view_hierarchy
vdata_dict["full"] = vdata_full
mdata = MuData({v: vdata_dict[v] for v in get_views_from_structure(mvatlas_mapped.view_hierarchy)})
mdata.mod["full"] = mdata.mod["full"][mdata.obs_names].copy()
view_transition_rule = mvatlas_mapped.view_transition_rule.copy()
# trans_rule = pd.Series(mvatlas.view_transition_rule.values.ravel()).dropna().unique()[0]
mvatlas_mapped = MultiViewAtlas(mdata, rename_obsm=False, transition_rule=view_transition_rule)
# mvatlas_mapped.view_transition_rule = view_transition_rule.copy()
return mvatlas_mapped
def concatenate_query(
mvatlas: MultiViewAtlas,
adata_query: AnnData,
view: str,
batch_key: str = "dataset_group",
batch_categories: List[str] = None,
):
"""Concatenate query data to atlas data"""
if batch_categories is None:
batch_categories = ["atlas", "query"]
adata_atlas = mvatlas.mdata[view].copy()
assert np.intersect1d(adata_atlas.obs_names, adata_query.obs_names).size == 0
adata_atlas = adata_atlas.concatenate(
adata_query, batch_key=batch_key, batch_categories=batch_categories, index_unique=None, uns_merge="unique"
)
return adata_atlas
def assign_from_similarity(Y_assign_atlas, X_similarity_atlas, X_similarity_query, v_assign, k=10):
"""Assign query cells to atlas views based on similarity to atlas cells.
Similarity can be derived from metadata annotation or from latent space
"""
if not isinstance(v_assign, pd.DataFrame):
raise ValueError("v_assign must be a pandas DataFrame")
if not v_assign.shape[1] == 1:
raise ValueError("v_assign must have only one column")
clf = KNeighborsClassifier(n_neighbors=k, metric="euclidean").fit(X_similarity_atlas, Y_assign_atlas)
Y_assign_query = clf.predict(X_similarity_query)
assign_predict = pd.DataFrame(Y_assign_query, columns=v_assign.columns)
return assign_predict
def map_next_view(
mvatlas: MultiViewAtlas,
adata_query: AnnData,
current_view: str,
next_view: str,
batch_key: str = "dataset_group",
batch_categories: List[str] = None,
) -> Tuple[pd.DataFrame, AnnData]:
"""Assign query cells to next view based on similarity to atlas cells.
Similarity is defined by the transition rule.
"""
if batch_categories is None:
batch_categories = ["atlas", "query"]
# curr_view_adata = _concatenate_query(mvatlas, adata_query, current_view, batch_key=batch_key, batch_categories=batch_categories)
curr_view_adata = mvatlas.mdata[current_view].copy()
next_view_adata = mvatlas.mdata[next_view].copy()
# Get similarity from transition rule
if batch_key in curr_view_adata.obs.columns:
v_assign = mvatlas.mdata.obsm["view_assign"].loc[mvatlas.mdata["full"].obs[batch_key] == batch_categories[0]][
[next_view]
]
curr_view_adata = curr_view_adata[curr_view_adata.obs[batch_key] == batch_categories[0]].copy()
# next_view_adata = next_view_adata[next_view_adata.obs[batch_key] == batch_categories[0]].copy()
# assert "dataset_group" not in next_view_adata.obs.columns
else:
v_assign = mvatlas.mdata.obsm["view_assign"].loc[mvatlas.mdata["full"].obs[batch_key] == batch_categories[0]][
[next_view]
]
transition_rule = mvatlas.view_transition_rule[current_view][next_view]
if transition_rule is not None:
try:
check_transition_rule(adata_query, transition_rule)
except ValueError:
logging.warning(
f"Could not check transition rule {transition_rule} for query data - skipping mapping from {current_view} to {next_view}"
)
v_assign_query = pd.DataFrame(0, columns=[next_view], index=adata_query.obs_names)
v_assign = pd.concat([v_assign, v_assign_query], axis=0)
return v_assign, next_view_adata
# Split to next view based on transition rule
# if transition is in obs
if transition_rule in adata_query.obs.columns:
if is_numeric_dtype(adata_query.obs[transition_rule]):
X_similarity_atlas = curr_view_adata.obs[[transition_rule]].values
X_similarity_query = adata_query.obs[[transition_rule]].values
else:
X_similarity_atlas = pd.get_dummies(curr_view_adata.obs[transition_rule])
X_similarity_query = pd.get_dummies(adata_query.obs[transition_rule])
# Check for missing levels
missing_cols = [x for x in X_similarity_atlas.columns if x not in X_similarity_query.columns]
if len(missing_cols) > 0:
X_similarity_query[missing_cols] = 0
X_similarity_query = X_similarity_query[X_similarity_atlas.columns].copy()
X_similarity_atlas = X_similarity_atlas.values
X_similarity_query = X_similarity_query.values
if transition_rule in adata_query.obsm:
X_similarity_atlas = curr_view_adata.obsm[transition_rule]
X_similarity_query = adata_query.obsm[transition_rule]
# X_similarity = curr_view_adata.obsm[transition_rule]
else:
raise ValueError(f"No transition rule defined for {current_view} -> {next_view}")
# Get assignment to next view
Y_assign_atlas = v_assign.loc[curr_view_adata.obs_names].values.ravel()
v_assign_query = assign_from_similarity(Y_assign_atlas, X_similarity_atlas, X_similarity_query, v_assign)
v_assign_query.index = adata_query.obs_names
v_assign = pd.concat([v_assign, v_assign_query], axis=0)
next_view_adata = concatenate_query(
mvatlas,
adata_query[v_assign_query[next_view] == 1],
next_view,
batch_key=batch_key,
batch_categories=batch_categories,
)
return v_assign, next_view_adata
|
Teichlab/multi-view-atlas
|
src/multi_view_atlas/tl/map_query.py
|
map_query.py
|
py
| 11,557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "warnings.simplefilter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "anndata.AnnData",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "utils.check_transition_rule",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas._harmonize_mdata_full",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "anndata.AnnData",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "mudata.MuData",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "utils.get_views_from_structure",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "anndata.AnnData",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "numpy.intersect1d",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "MultiViewAtlas.MultiViewAtlas",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "anndata.AnnData",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "utils.check_transition_rule",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "pandas.api.types.is_numeric_dtype",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "anndata.AnnData",
"line_number": 204,
"usage_type": "name"
}
] |
18390517391
|
import re
import scrapy
base_url = "https://www.senscritique.com/liste/Cine_2017_sur_un_coin_de_table/1562107"
class SC_Spider(scrapy.Spider):
name = "liste"
start_urls = [base_url]
summary_done = False
current_page = 1
def parse(self, response):
if not self.summary_done:
list_title = re.sub('\s+', ' ', response.css('h1.d-heading1::text').extract_first())
list_desc = str.join('\n', response.css('div.d-grid')[2].css('p::text').extract())
yield {'list_title': list_title, 'list_desc': list_desc}
self.summary_done = True
list_fragment = response.css('div.elli-content')
for item in list_fragment:
title = item.css('a.elco-anchor::text').extract_first()
try:
comment = str.join('\n', item.css('div.elli-annotation-content::text').extract())
except:
comment = ''
yield {'title': title, 'comment': comment}
try:
if self.current_page > 11:
raise Exception
self.current_page += 1
next_page = response.css('div.eipa-interface')[0].css('li.eipa-page')[min(self.current_page, 7)].css('a.eipa-anchor::attr(href)').extract_first()
idx = next_page.rfind('-')
next_page = next_page[:idx+1] + str(self.current_page)
except:
next_page = None
if next_page is not None:
yield response.follow(next_page, self.parse)
|
ria4/tln
|
util/old/crawlers/sc/liste_crawler.py
|
liste_crawler.py
|
py
| 1,503 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 14,
"usage_type": "call"
}
] |
2053824632
|
# USAGE
# python extract_features.py --dataset ../datasets/kaggle_dogs_vs_cats/train --output ../datasets/kaggle_dogs_vs_cats/hdf5/features.hdf5
from keras.applications import ResNet50, imagenet_utils
from keras.preprocessing.image import load_img, img_to_array
from sklearn.preprocessing import LabelEncoder
from pyimagesearch.io import HDF5DatasetWriter
from imutils import paths
import numpy as np
import progressbar
import os
import random
import argparse
# construct argument parser and parse the argument
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True, help='path to input dataset')
ap.add_argument('-o', '--output', required=True, help='path to output hdf5 file')
ap.add_argument('-b', '--buffer-size', type=int, default=1000, help='size of feature extraction buffer')
ap.add_argument('-s', '--batch-size', type=int, default=32, help='batch size of image to be passed through feature extractor')
args = vars(ap.parse_args())
# construct batch size for convenience usr
batch_size = args['batch_size']
# grab the list of image paths and extract labels through them
print('[INFO] loading images...')
image_paths = list(paths.list_images(args['dataset']))
random.shuffle(image_paths)
labels = [image_path.split(os.path.sep)[-1].split('.')[0] for image_path in image_paths]
le = LabelEncoder()
labels = le.fit_transform(labels)
# load model
print('[INFO] loading model...')
model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
# initialize HDF5 dataset writer
dataset = HDF5DatasetWriter((len(labels), 2048), image_paths, data_key='features', buf_size=args['buffer_size'])
dataset.storeClassLabels(le.classes_)
# construct progress bar
widgets = ['Extracting features: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(labels), widgets=widgets).start()
# loop over batches of images
for i in range(0, len(labels), batch_size):
batch_paths = image_paths[i:i+batch_size]
batch_labels = labels[i:i+batch_size]
batch_images = []
# loop over images
for image_path in batch_paths:
# load image and convert it to Keras-compatitble array
image = load_img(image_path, target_size=(224, 224))
image = img_to_array(image)
# add extra dimension
image = np.expand_dims(image, axis=0)
# preprocess by Resnet50
image = imagenet_utils.preprocess_input(image)
# add to batch
batch_images.append(image)
# pass batch through Resnet50
batch_images = np.vstack(batch_images)
features = model.predict(batch_images, batch_size=batch_size)
features = features.reshape((len(features), -1))
# add to hdf5 dataset
dataset.add(features, batch_labels)
pbar.update(i)
# close dataset
dataset.close()
pbar.finish()
|
lykhahaha/Mine
|
PractitionerBundle/chapter10-dogs_vs_cats/extract_features.py
|
extract_features.py
|
py
| 2,835 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "imutils.paths.list_images",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "imutils.paths",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.applications.ResNet50",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyimagesearch.io.HDF5DatasetWriter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "progressbar.Percentage",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "progressbar.Bar",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "progressbar.ETA",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "progressbar.ProgressBar",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.load_img",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "keras.applications.imagenet_utils.preprocess_input",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "keras.applications.imagenet_utils",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.vstack",
"line_number": 64,
"usage_type": "call"
}
] |
34776536653
|
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from ..filters.CRPFilter import CRPFilter
from ..forms.CRPForm import UpdateCRPForm, CRPTrackForm, AddCRPForm
from django.forms import modelformset_factory
from datetime import datetime, timedelta
from ..forms.ProductCenterForm import CapacitySchedulingFormCRP
from ..models.CRP import CRPTrack, CRP
from ..models.Finished import Finished
from ..models.OperationList import OperationList
from ..models.ProductCenter import ProductCenter, CapacityScheduling
from ..models.OperationSequence import OperationSequence
from django.contrib import messages
list_crp_props = {
"title": "CRP",
}
post_crp_props = {
"title": "Add CRP",
}
views_crp_props = {
"title": "View CRP",
}
update_crp_props = {
"title": "Update CRP",
'btn_name': 'Update'
}
copy_crp_props = {
"title": "Copy CRP",
'btn_name': 'Copy'
}
def ListCRP(request):
if request.method == "GET":
crp_track = CRPTrack.objects.all()
list_crp_props['crp_track'] = crp_track
list_crp_props['filters'] = CRPFilter()
return render(request, 'PP/planning/crp/list.html', context=list_crp_props)
def FilterCRP(request):
if request.method == 'POST':
filter = CRPFilter(request.POST, queryset=CRPTrack.objects.all())
if filter.is_valid():
if len(filter.qs.values()) != 0:
return JsonResponse({
'CRPTrack': list(filter.qs.values()),
'mass': 'success',
}, status=200)
else:
return JsonResponse({
'mass': 'error',
}, status=200)
else:
return JsonResponse({
'mass': 'error',
})
else:
return JsonResponse({
'mass': 'error',
})
def PostCRP(request):
if request.method == "POST":
finish = request.POST.get('finished_no')
order_ref = request.POST.get('order_ref')
order_qty = request.POST.get('order_qty')
if request.is_ajax():
finish = Finished.objects.get(id=finish)
opl = OperationList.objects.filter(id=finish.operations_list.id).values()
opsec = OperationSequence.objects.filter(operation_list=finish.operations_list).values()
crp = []
for ops in opsec:
pc = ProductCenter.objects.get(id=ops['production_center_id'])
cs = CapacityScheduling.objects.filter(ProductCenterId=ops['production_center_id']).last()
crp.append({
'operationSequence': ops['operationSequence'],
'productioncneter': pc.product_center_name,
'AvalStartDate': cs.Date,
'reqdcapunit': ops['reqdcapunit'],
'ReqdMcHrByUnit': ops['totaltime'],
'AvalStartTime': cs.EndTime,
'AvalMcHrOrDay': cs.BalMcOrHour,
'NoOfMCByResAval': int(pc.NoOfMByC) - int(cs.NoOfMCAlloctd),
'AvalCAPByDay': cs.BalanceCap,
})
crp_track_form = CRPTrackForm({'finished': finish,
'operationlist': finish.operations_list.id,
'product_description': finish.product_description,
'order_ref': order_ref,
'order_qty': order_qty
})
if crp_track_form.is_valid():
crp_track_form = crp_track_form.save()
return JsonResponse({
'mass': "success",
'operation_scheduling': list(opsec),
'crps': crp,
'CRPTrack': list(CRPTrack.objects.filter(id=crp_track_form.id).values())
}, status=200)
operationSequence = request.POST.getlist('operationSequence')
productioncneter = request.POST.getlist('productioncneter')
AvalStartDate = request.POST.getlist('AvalStartDate')
StartDate = request.POST.getlist('StartDate')
NoOfMCByResAval = request.POST.getlist('NoOfMCByResAval')
AvalCAPByDay = request.POST.getlist('AvalCAPByDay')
reqdcapunit = request.POST.getlist('reqdcapunit')
ReqdCAPByDay = request.POST.getlist('ReqdCAPByDay')
AvalMcHrOrDay = request.POST.getlist('AvalMcHrOrDay')
ReqdMcHrByUnit = request.POST.getlist('ReqdMcHrByUnit')
ReqdMcHour = request.POST.getlist('ReqdMcHour')
AvalStartTime = request.POST.getlist('AvalStartTime')
StartTime = request.POST.getlist('StartTime')
EndTime = request.POST.getlist('EndTime')
EndDate = request.POST.getlist('EndDate')
NoOfMcByRes = request.POST.getlist('NoOfMcByRes')
mc_id_no = request.POST.getlist('mc_id_no')
crp_track = request.POST.getlist('crp_track')
mass = False
if len(operationSequence) != 0:
for x in range(0, len(operationSequence)):
crp = AddCRPForm({
'crp_track': CRPTrack.objects.get(id=crp_track[x]),
'operationSequence': operationSequence[x],
"productioncneter": productioncneter[x],
"reqdcapunit": reqdcapunit[x],
"ReqdMcHrByUnit": ReqdMcHrByUnit[x],
"AvalStartDate": AvalStartDate[x],
'AvalStartTime': AvalStartTime[x],
"NoOfMCByResAval": NoOfMCByResAval[x],
"AvalCAPByDay": AvalCAPByDay[x],
'AvalMcHrOrDay': AvalMcHrOrDay[x],
'StartDate': StartDate[x],
'ReqdCAPByDay': ReqdCAPByDay[x],
'ReqdMcHour': ReqdMcHour[x],
'StartTime': StartTime[x],
'EndTime': EndTime[x],
'EndDate': EndDate[x],
'NoOfMcByRes': NoOfMcByRes[x],
'mc_id_no': mc_id_no[x]
})
if crp.is_valid():
crp = crp.save()
crp_tracks = CRPTrack.objects.get(id=crp.crp_track)
BalanceCap = int(crp.AvalCAPByDay) - int(crp.ReqdCAPByDay)
cs = CapacitySchedulingFormCRP({
'ProductCenterId': ProductCenter.objects.get(product_center_name=crp.productioncneter),
'Date': crp.AvalStartDate,
'AvalCapOrDay': crp.AvalCAPByDay,
'CapALlloctdTo': crp_tracks.finished.id,
'AlloctdCap': crp.ReqdCAPByDay,
'BalanceCap': BalanceCap,
'AvalMcOrResHour': crp.AvalMcHrOrDay,
'ReqdMcOrResHour': crp.ReqdMcHour,
'BalMcOrHour': str(float(crp.AvalMcHrOrDay) - float(crp.ReqdMcHour)),
'StartTime': crp.StartTime,
'EndTime': crp.EndTime,
'NoOfMCAlloctd': crp.NoOfMcByRes,
})
if cs.is_valid():
cs.save()
if BalanceCap == 0:
AvalStartDates = datetime.strptime(str(crp.AvalStartDate), '%Y-%m-%d')
AvalStartDates = AvalStartDates + timedelta(days=1)
pc = ProductCenter.objects.get(product_center_name=crp.productioncneter)
csl = CapacityScheduling.objects.filter(ProductCenterId=pc.id).first()
cs = CapacitySchedulingFormCRP({
'ProductCenterId': ProductCenter.objects.get(product_center_name=crp.productioncneter),
'Date': AvalStartDates,
'AvalCapOrDay': csl.AvalCapOrDay,
'CapALlloctdTo': csl.CapALlloctdTo,
'AlloctdCap': csl.AlloctdCap,
'BalanceCap': csl.BalanceCap,
'AvalMcOrResHour': csl.AvalMcOrResHour,
'ReqdMcOrResHour': csl.ReqdMcOrResHour,
'BalMcOrHour': csl.BalMcOrHour,
'StartTime': csl.StartTime,
'EndTime': csl.EndTime,
'NoOfMCAlloctd': csl.NoOfMCAlloctd,
})
if cs.is_valid():
cs.save()
mass = True
else:
mass = False
if mass:
messages.success(request, 'CRP Created Successfully.')
return HttpResponseRedirect(reverse('productionplanning:CRP', args={crp_track[0]}))
else:
messages.error(request, 'CRP Not Created Successfully.')
return render(request, 'PP/Planning/crp/general_data_form.html', context=post_crp_props)
else:
return render(request, 'PP/Planning/crp/general_data_form.html', context=post_crp_props)
def AddCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
post_crp_props['crp_track'] = crp_track
post_crp_props['crp'] = crp
return render(request, 'PP/Planning/crp/add_crp.html', context=post_crp_props)
def ViewCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
views_crp_props['crp_track'] = crp_track
views_crp_props['crp'] = crp
return render(request, 'PP/Planning/crp/views.html', context=views_crp_props)
def UpdateCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
update_crp_props['crp_track'] = crp_track
update_crp_props['crp'] = crp
CRPFoemSet = modelformset_factory(CRP, form=UpdateCRPForm, extra=0)
formset = CRPFoemSet(queryset=crp)
update_crp_props['formset'] = formset
if request.POST:
formset = CRPFoemSet(request.POST, queryset=crp)
if formset.has_changed():
if formset.is_valid():
formset.save()
messages.success(request, 'CRP Update Successfully.')
return HttpResponseRedirect(reverse('productionplanning:UpdateCRP', args={crp_track.id}))
else:
messages.error(request, 'CRP Form is not valid.')
return HttpResponseRedirect(reverse('productionplanning:UpdateCRP', args={crp_track.id}))
else:
messages.error(request, 'Value is not Change.')
return HttpResponseRedirect(reverse('productionplanning:UpdateCRP', args={crp_track.id}))
else:
return render(request, 'PP/Planning/crp/update_crp.html', context=update_crp_props)
def CopyCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
copy_crp_props['crp_track'] = crp_track
copy_crp_props['crp'] = crp
CRPFoemSet = modelformset_factory(CRP, form=UpdateCRPForm, extra=0)
formset = CRPFoemSet(queryset=crp)
copy_crp_props['formset'] = formset
if request.POST:
mass = []
crp_trackForm = CRPTrackForm(data={
'finished': crp_track.finished,
'operationlist': crp_track.operationlist,
'product_description': crp_track.product_description,
'order_ref': crp_track.order_ref,
'order_qty': crp_track.order_qty,
})
if crp_trackForm.is_valid():
crp_trackForm = crp_trackForm.save()
for x in crp:
dirs = {
'crp_track': CRPTrack.objects.get(id=crp_trackForm.id),
'operationSequence': x.operationSequence,
'productioncneter': x.productioncneter,
'AvalStartDate': x.AvalStartDate,
'StartDate': x.StartDate,
'reqdcapunit': x.reqdcapunit,
'ReqdMcHrByUnit': x.ReqdMcHrByUnit,
'AvalStartTime': x.AvalStartTime,
'AvalMcHrOrDay': x.AvalMcHrOrDay,
'NoOfMCByResAval': x.NoOfMCByResAval,
'AvalCAPByDay': x.AvalCAPByDay,
'ReqdCAPByDay': x.ReqdCAPByDay,
'ReqdMcHour': x.ReqdMcHour,
'StartTime': x.StartTime,
'EndTime': x.EndTime,
'EndDate': x.EndDate,
'NoOfMcByRes': x.NoOfMcByRes,
'mc_id_no': x.mc_id_no,
}
crp_form = UpdateCRPForm(data=dirs)
print(2,crp_form.is_valid())
if crp_form.is_valid():
crp_form = crp_form.save(commit=False)
crp_form.crp_track = CRPTrack.objects.get(id=crp_trackForm.id)
crp_form.save()
mass.append(True)
else:
mass.append(False)
if len(mass) == mass.count(True):
messages.success(request, '{} to {} Copy Successfully.'.format(id, crp_trackForm.id))
return redirect('/productionplanning/crp/')
else:
messages.error(request, 'Copy Not Successfully.')
return redirect('/productionplanning/crp/')
else:
messages.error(request, "CRP Form isn't Valid.")
return redirect('/productionplanning/crp/')
else:
return render(request, 'PP/Planning/crp/update_crp.html', context=copy_crp_props)
def DeleteCRP(request, id):
crp = CRPTrack.objects.get(id=id)
crp.delete()
messages.error(request, '{} Delete Successfully.'.format(id))
return redirect('/productionplanning/crp/')
|
nazmul53p/ERP
|
productionplanning/views/CRPViews.py
|
CRPViews.py
|
py
| 14,011 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "models.CRP.CRPTrack.objects.all",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "filters.CRPFilter.CRPFilter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "filters.CRPFilter.CRPFilter",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.all",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "models.Finished.Finished.objects.get",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "models.Finished.Finished.objects",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "models.Finished.Finished",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "models.OperationList.OperationList.objects.filter",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "models.OperationList.OperationList.objects",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "models.OperationList.OperationList",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "models.OperationSequence.OperationSequence.objects.filter",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.OperationSequence.OperationSequence.objects",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "models.OperationSequence.OperationSequence",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects.get",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "models.ProductCenter.ProductCenter",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "models.ProductCenter.CapacityScheduling.objects.filter",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.CapacityScheduling.objects",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "models.ProductCenter.CapacityScheduling",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "forms.CRPForm.CRPTrackForm",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.filter",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "forms.CRPForm.AddCRPForm",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "forms.ProductCenterForm.CapacitySchedulingFormCRP",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects.get",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "models.ProductCenter.ProductCenter",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects.get",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "models.ProductCenter.ProductCenter",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "models.ProductCenter.CapacityScheduling.objects.filter",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.CapacityScheduling.objects",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "models.ProductCenter.CapacityScheduling",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "forms.ProductCenterForm.CapacitySchedulingFormCRP",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects.get",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "models.ProductCenter.ProductCenter.objects",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "models.ProductCenter.ProductCenter",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "models.CRP.CRP.objects.filter",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRP.objects",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRP",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "models.CRP.CRP.objects.filter",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRP.objects",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRP",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "models.CRP.CRP.objects.filter",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRP.objects",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRP",
"line_number": 235,
"usage_type": "name"
},
{
"api_name": "django.forms.modelformset_factory",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRP",
"line_number": 238,
"usage_type": "argument"
},
{
"api_name": "forms.CRPForm.UpdateCRPForm",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "models.CRP.CRP.objects.filter",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRP.objects",
"line_number": 260,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRP",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "django.forms.modelformset_factory",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRP",
"line_number": 263,
"usage_type": "argument"
},
{
"api_name": "forms.CRPForm.UpdateCRPForm",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "forms.CRPForm.CRPTrackForm",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "forms.CRPForm.UpdateCRPForm",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects.get",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "models.CRP.CRPTrack.objects",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "models.CRP.CRPTrack",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 325,
"usage_type": "call"
}
] |
18026129984
|
import requests
from bs4 import BeautifulSoup
import csv
from os import getcwd
print('\n'*3)
print(" /~________~\ .----------. (| L0L Pull |) '----------' \_~~~~~~~~_/ ")
print('\n'*3)
print(" .: Author- Jean Paul :. ")
print('\n'*3)
current_directory=getcwd()
print(" Your Current Working Directory is : \n",current_directory)
outpath=current_directory+"\\l0lB@S_Output.csv"
myFile = open(outpath, 'a+',newline='')
csv_out = csv.writer(myFile)
csv_out.writerow(["Name","BInary_Link","Function","Code(s)"])
url="https://lolbas-project.github.io"
r=requests.get(url)
print('_'*60)
print("Connected to LOLBAS: ",url)
print('\n'*3)
soup=BeautifulSoup(r.content)
for tag in soup.find_all('a','bin-name'):
hrefs=tag['href'].replace("//","")
urls=url+hrefs
for ur in [urls]:
nr=requests.get(ur)
sups=BeautifulSoup(nr.content, "html.parser")
print('_'*60)
funclist=[[t.text for t in f.find_all('a')] for f in sups.find_all('ul','function-list')]
code=[i.text for i in sups.find_all('code')]
print("Working on binary",tag.text," at location ",ur," ::::Contains ",funclist)
csv_out.writerow([tag.text,urls,funclist,code])
|
PurpleS3Cf0X/Pull_LOLBAS
|
Pull_L0LBAS_Binaries.py
|
Pull_L0LBAS_Binaries.py
|
py
| 1,292 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
}
] |
2266571308
|
import os
import fnmatch
import datetime
from lxml import etree
from openpyxl import Workbook
from itertools import count
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
def save_test_result_to_excel_file():
tc_name_list = []
tc_status_list = []
tc_error_list = []
tc_tag_list = []
xml_files = get_all_xml_files()
for each_xml_file in xml_files:
context = etree.iterparse(each_xml_file, events=('end',))
for event, elem in context:
if elem.tag == 'test':
tc_name_attrib = elem.attrib['name']
tc_name_list.append(tc_name_attrib)
tc_status_list.append(get_test_status_path(
elem, tc_name_attrib)[0].attrib['status'])
tc_error_list.append(get_test_status_path(elem, tc_name_attrib)[0].text)
tc_tag_list.append(get_test_tags(elem, tc_name_attrib))
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
del context
date_stamp = '{:%Y-%m-%d-%H%M%S}'.format(datetime.datetime.now())
result_file = 'test_result-' + date_stamp + '.xlsx'
wb = Workbook()
ws = wb.worksheets[0]
ws['A1'] = 'Test Case'
ws['B1'] = 'Status'
ws['C1'] = 'Error Details'
ws['D1'] = 'Tags'
ws['E1'] = 'Comments'
for i, name, status, error, tags in izip(count(), tc_name_list, tc_status_list, tc_error_list, tc_tag_list):
ws['A' + str(i + 2)] = name
ws['B' + str(i + 2)] = status
ws['C' + str(i + 2)] = error
ws['D' + str(i + 2)] = ', '.join(tags)
ws.auto_filter.ref = 'A1:E1'
wb.save(filename=result_file)
def get_test_status_path(elem, tc_name_attrib):
if "'" in tc_name_attrib:
tc_name_quoted = '"%s"' % tc_name_attrib
return elem.xpath("//test[@name=" + tc_name_quoted + "]/status")
else:
return elem.xpath("//test[@name='" + tc_name_attrib + "']/status")
def get_test_tags(elem, tc_name_attrib):
if "'" in tc_name_attrib:
tc_name_quoted = '"%s"' % tc_name_attrib
return elem.xpath("//test[@name=" + tc_name_quoted + "]/tags/tag/text()")
else:
return elem.xpath("//test[@name='" + tc_name_attrib + "']/tags/tag/text()")
def get_all_xml_files():
xml_files = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.xml'):
xml_files.append(os.path.join(root, filename))
return xml_files
if __name__ == '__main__':
save_test_result_to_excel_file()
|
jsonpanganiban/robotframework-result-extractor
|
result_extractor_excel.py
|
result_extractor_excel.py
|
py
| 2,706 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "itertools.izip",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "lxml.etree.iterparse",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "itertools.izip",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "fnmatch.filter",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
}
] |
12793192330
|
import json
import random
for i in range(20):
num = random.randint(0, 7)
cow = "Cow"
if num == 6 or num == 7:
cow = "Not Cow"
data = {"name": "Cowpoly #{0}".format(i),
"tokenID": i, "description": "Cowpoly is an experimental NFT collection created for educational purpose only. Credits for all artwork go to original creators.",
"image": "https://github.com/eddiekaung/cowpolyNFT/raw/main/images/{0}.jpg".format(num),
"attributes": [
{"trait_type": "Type", "value": cow}
]}
with open('{0}.json'.format(i), 'w') as f:
json.dump(data, f)
|
eddiekaung/cowpolyNFT
|
metadata/script.py
|
script.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 17,
"usage_type": "call"
}
] |
10424727921
|
#-*- coding: utf-8 -*-
"""
@author: Jordi Fernández
@contact: [email protected]
@organization: Whads/Accent SL
@since: September 2010
"""
import cherrypy
from cocktail.events import when
from cocktail import schema
from cocktail.translations import translations
from cocktail.controllers import context
from woost.models import Extension, Document, Template, User
from woost.models.permission import DeletePermission, ModifyPermission
translations.define("MailerExtension",
ca = u"Enviament de documents per correu electrònic",
es = u"Envio de documentos por correo electrónico",
en = u"Sending documents by email"
)
translations.define("MailerExtension-plural",
ca = u"Enviament de documents per correu electrònic",
es = u"Envio de documentos por correo electrónico",
en = u"Sending documents by email"
)
class MailerExtension(Extension):
def __init__(self, **values):
Extension.__init__(self, **values)
self.extension_author = u"Whads/Accent SL"
self.set("description",
u"""Permet enviar documents per correu electrònic.""",
"ca"
)
self.set("description",
u"""Permite enviar documents por correo electrónico.""",
"es"
)
self.set("description",
u"""Allows send documents by email.""",
"en"
)
def _load(self):
from woost.controllers.notifications import notify_user
from woost.controllers.backoffice.basebackofficecontroller import \
BaseBackOfficeController
from woost.controllers.backoffice.itemcontroller import \
ItemController
from woost.extensions.mailer import (
sendemailaction,
createmailingaction,
strings
)
from woost.extensions.mailer.mailing import Mailing, \
RunningMailingError
from woost.extensions.mailer.sendemailcontroller import \
SendEmailController
ItemController.send_email = SendEmailController
Template.add_member(
schema.Boolean(
"per_user_customizable",
default = False,
listed_by_default = False
)
)
Template.members_order.append("per_user_customizable")
User.add_member(
schema.Collection(
"mailingLists",
items = "woost.extensions.mailer.mailinglist.MailingList",
bidirectional = True,
listed_by_default = False
)
)
@when(BaseBackOfficeController.exception_raised)
def handle_exception_raised(event):
if isinstance(
event.exception,
RunningMailingError
):
notify_user(translations(event.exception), "error")
raise cherrypy.HTTPRedirect(event.source.contextual_uri())
# Disable interactive features from rendered pages when rendering
# static content
from woost.controllers.cmscontroller import CMSController
@when(CMSController.producing_output)
def disable_user_controls(event):
if context.get("sending_email", False):
event.output["show_user_controls"] = False
|
marticongost/woost
|
woost/extensions/mailer/__init__.py
|
__init__.py
|
py
| 3,502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cocktail.translations.translations.define",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cocktail.translations.translations",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "cocktail.translations.translations.define",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cocktail.translations.translations",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "woost.models.Extension",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "woost.models.Extension.__init__",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "woost.models.Extension",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "woost.controllers.backoffice.itemcontroller.ItemController.send_email",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "woost.controllers.backoffice.itemcontroller.ItemController",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "woost.extensions.mailer.sendemailcontroller.SendEmailController",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "woost.models.Template.add_member",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "woost.models.Template",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "cocktail.schema.Boolean",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cocktail.schema",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "woost.models.Template.members_order.append",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "woost.models.Template.members_order",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "woost.models.Template",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "woost.models.User.add_member",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "woost.models.User",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "cocktail.schema.Collection",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cocktail.schema",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "woost.extensions.mailer.mailing.RunningMailingError",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "woost.controllers.notifications.notify_user",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cocktail.translations.translations",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cherrypy.HTTPRedirect",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "cocktail.events.when",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "woost.controllers.backoffice.basebackofficecontroller.BaseBackOfficeController.exception_raised",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "woost.controllers.backoffice.basebackofficecontroller.BaseBackOfficeController",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "cocktail.controllers.context.get",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "cocktail.controllers.context",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "cocktail.events.when",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "woost.controllers.cmscontroller.CMSController.producing_output",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "woost.controllers.cmscontroller.CMSController",
"line_number": 100,
"usage_type": "name"
}
] |
19624665111
|
# -*- coding: utf-8 -*-
import math
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.contrib import messages
# Create your views here.
from django.urls import reverse_lazy
from create_profile.forms import CreateFormsB
from create_profile.models import CreateNeighbour, CreateBusiness, LikeModel, LikeBusinesModel, AllCities#, \
#Profession # , LikeEndModelNe
def index(request):
# Главная
if request.user.is_authenticated:
# при авторизации сообщение пользоваиелям у кого встречные лайки соседей
all_likes = LikeModel.objects.filter(like_second=request.user).values('like_first')
for all_likes_s in all_likes:
# print('all_likes-Neighb')
# print(all_likes)
step_one = []
step_two = []
if len(all_likes) > 0:
# print('шаг1 нашел')
step_one = all_likes_s
# шаг 2
all_likes_two = LikeModel.objects.filter(like_first=request.user).values('like_second')
if len(all_likes_two) > 0:
# print('шаг 2 нашел')
# print(all_likes_two)
step_two = all_likes_two
# ii = int(0)
# print('step_one :', step_one)
# print('step_two :', step_two)
for step_two_s in step_two:
if step_two_s['like_second'] == step_one['like_first']:
messages.success(request, 'Вас лайкнул пользователь: ' + step_two_s['like_second'])
email_second = LikeModel.objects.filter(like_second=step_two_s['like_second']).values('email_second')
messages.success(request, ' Напишите ему: ' + email_second[0]['email_second'])
all_likes_b = LikeBusinesModel.objects.filter(like_second=request.user).values()
for all_likes_s_b in all_likes_b:
step_one_b = []
step_two_b = []
if len(all_likes_b) > 0:
if len(all_likes_b) > 0:
step_one_b = all_likes_s_b
all_likes_b_two = LikeBusinesModel.objects.filter(like_first=request.user).values('like_second')
if len(all_likes_b_two) > 0:
step_two_b = all_likes_b_two
for all_likes_s_b_s in step_two_b:
if all_likes_s_b_s['like_second'] == step_one_b['like_first']:
messages.success(request, 'Вас лайкнул бизнес пользователь: ' + all_likes_s_b_s['like_second'])
email_second_b = LikeBusinesModel.objects.filter(like_second=all_likes_s_b_s['like_second']).values('email_second')
messages.success(request, 'Почта для связи: ' + email_second_b[0]['email_second'])
return render(request, 'index.html',)
else:
return render(request, 'index.html', )
@login_required(login_url=reverse_lazy('login'))
def all_filter_search(request):
# выводим все анкеты
list_tmp_13 = 0
allCities = AllCities.objects.all()
catalog_filter = CreateNeighbour.objects.values_list('id',
'name',
'gender',
'gender_neighb',
# 'sel_city',
# 'sel_distr',
'presence_animals',
'presence_flat',
'attitude_animals',
'attitude_smok',
'about_me',
'image',
'user_neig',
'cities',
'regin',)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
# формируем масив лдя прорисовки в шблоне
for all_goods_s in catalog_filter:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
p = CreateNeighbour(gender=tmp_s[2])
gender = p.get_gender_display()
p = CreateNeighbour(gender_neighb=tmp_s[3])
gender_neighb = p.get_gender_neighb_display()
# p = CreateNeighbour(sel_city=tmp_s[4])
# sel_city = p.get_sel_city_display()
#
# p = CreateNeighbour(sel_distr=tmp_s[5])
# sel_distr = p.get_sel_distr_display()
p = CreateNeighbour(attitude_animals=tmp_s[6])
attitude_animals = p.get_attitude_animals_display()
p = CreateNeighbour(attitude_smok=tmp_s[7])
attitude_smok = p.get_attitude_smok_display()
cities_name = AllCities.objects.filter(name=tmp_s[11]).values('name')
distr_name = AllCities.objects.filter(name=tmp_s[12]).values('name')
# cities_b = p.get_cities_b_display()
# print('allCities_name')
#
# print(cities_name[0]['name'])
# print(distr_name[0]['name'])
list_tmp = list(tmp_s)
list_tmp[2] = gender
list_tmp[3] = gender_neighb
# list_tmp[4] = sel_city
# list_tmp[5] = sel_distr
list_tmp[6] = attitude_animals
list_tmp[7] = attitude_smok
list_tmp[11] = cities_name[0]['name']
list_tmp[12] = distr_name[0]['name']
user_like = LikeModel.objects.filter(first_id=tmp_s[10]).filter(second_id=request.user.id).values_list('id') # лайки в шаблоне
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
tmp_categ_name.append(tmp_categ_name_list) # формируем массив со всеми анкетами
# print('allCities')
# print(allCities)
return render(request, 'all_neighb.html', {'serxh_filer': tmp_categ_name, 'allCities': allCities,})
@login_required(login_url=reverse_lazy('login'))
def all_busines(request):
# весь бизнес
# формируем массив со всеми анкетами
distinr = CreateBusiness.objects.order_by('category_bus').values('category_bus').distinct()
print('distin254')
print(distinr)
allCities = AllCities.objects.all()
form = CreateFormsB()
search_filer = CreateBusiness.objects.values_list('name_bus',
# 'sel_city',
# 'sel_distr',
'category_bus',
'about_me',
'image',
'user_bus',
'id',
'cities',
'regin',)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
for all_goods_s in search_filer:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
# p = CreateBusiness(sel_city=tmp_s[1])
# sel_city = p.get_sel_city_display()
#
# p = CreateBusiness(sel_distr=tmp_s[2])
# sel_distr = p.get_sel_distr_display()
# cities_name_b = AllCities.objects.filter(id=tmp_s[8]).values('name')
# distr_name_b = AllCities.objects.filter(id=tmp_s[9]).values('name')
# search_filer_pro = Profession.objects.filter(id=tmp_s[10]).values('name')
# cities_b = p.get_cities_b_display()
# print('search_filer_pro')
# print(search_filer_pro[0]['name'])
list_tmp = list(tmp_s)
# list_tmp[1] = sel_city
# list_tmp[2] = sel_distr
# list_tmp[8] = cities_name_b[0]['name']
# list_tmp[9] = distr_name_b[0]['name']
# list_tmp[10] = search_filer_pro[0]['name']
# list_tmp[11] = search_filer_pro[0]['name']
# лайки
user_like = LikeBusinesModel.objects.filter(first_id=tmp_s[4]).filter(second_id=request.user.id).values_list('id')
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
# AllCities.cities_b.all()
tmp_categ_name.append(tmp_categ_name_list)# формируем массив со всеми анкетами
return render(request, 'all_busines.html', {'serxh_filer': tmp_categ_name, 'allCities': allCities, 'form': form, 'distinr': distinr})
def likes(request):
# likes заносим в таблицу лайкующего и хозяйна анкеты
test_user = User.objects.filter(id=request.POST.get('first')).values('username', 'email')
nearly_final = LikeModel(like_first=test_user[0]['username'], email_first=test_user[0]['email'],
like_second=request.POST.get('second'), email_second=request.POST.get('email'),
first_id=request.POST.get('first'), second_id=request.POST.get('second_id'),
id_ancket=request.POST.get('id_ancket'),)
nearly_final.save(force_insert=True)
return redirect('/all_neighb/')
def likes_busines(request):
# likes заносим в таблицу лайкующего и хозяйна анкеты
test_user = User.objects.filter(id=request.POST.get('first')).values('username', 'email')
nearly_final = LikeBusinesModel(like_first=test_user[0]['username'], email_first=test_user[0]['email'],
like_second=request.POST.get('second'), email_second=request.POST.get('email'),
first_id=request.POST.get('first'), second_id=request.POST.get('second_id'),
id_ancket=request.POST.get('id_ancket'),)
nearly_final.save(force_insert=True)
return redirect('/all_busines/')
def statistics(request):
# статистика
count_us = User.objects.values('username', 'email')
count_users = (len(count_us))
all_likes = LikeModel.objects.values()
count_stat = len(all_likes)/2
all_likes_n = math.ceil(count_stat)
all_likes_b = LikeBusinesModel.objects.values()
count_stat = len(all_likes_b)/2
all_likes_bus = math.ceil(count_stat)
# for count_us_s in count_us:
print(count_us[0]['username'])
tmp =count_us[0]['username']
all_likes_count = LikeModel.objects.filter(like_second=count_us[0]['username']).count()
all_likes_count_two = LikeModel.objects.filter(like_first=count_us[0]['username']).count()
if all_likes_count > 0 and all_likes_count_two > 0:
# print('tmp')
# print(tmp)
all_likes_count_two_two = LikeModel.objects.filter(like_first=count_us[0]['username']).values()
# print('all_likes_count_two_two ')
# print(all_likes_count_two_two[0]['like_first'])
# save_end_like = LikeEndModelNe(like_first=all_likes_count_two_two[0]['like_first'],
# email_first=all_likes_count_two_two[0]['email_first'],
# like_second=all_likes_count_two_two[0]['like_second'],
# email_second=all_likes_count_two_two[0]['email_second'],)
# print('save_end_like')
# print(save_end_like)
# save_end_like.save()
# print(all_likes_count, all_likes_count_two)
return render(request, 'statistics.html', {'count_users':count_users, 'all_likes_n': all_likes_n, 'all_likes_bus': all_likes_bus})
def tree_form(request):
# по айди города получаем районы
dict1 = {}
# print(request.GET.get('id'))
city_id = request.GET.get('id')
all_child = AllCities.objects.filter(parent_id=city_id).values('name', 'id')
# print('all_child')
# print(all_child)
dict1['data']=list(all_child)# словарь в словаре для передачи json
# print('dict1')
# print(dict1)
return JsonResponse(dict1)
def tree_form_b(request):
dict2 = {}
print(request.GET.get('id'))
city_id = request.GET.get('id')
all_child = AllCities.objects.filter(parent_id=city_id).values('name', 'id')
print('all_child_b-450')
print(all_child)
# return HttpResponse('1')
dict2['data']=list(all_child)
print('dict1_b-456')
print(dict2)
# return JsonResponse({'all_child': all_child, 'status': 'ok', 'dict1': dict1})
return JsonResponse(dict2)
# return HttpResponse({'all_child': all_child,})
def search_fetch(request):
# по айди города, района и пол соседа получаем анкты
print('distr-461')
print(request.GET.get('id'))
print('seacrprof464')
print(request.GET.get('distr'))
print(request.GET)
cities_name = AllCities.objects.filter(id=request.GET.get('id')).values('name')
distr_name = AllCities.objects.filter(id=request.GET.get('distr')).values('name')
catalog_filter=CreateNeighbour.objects.filter(cities=cities_name[0]['name'],
regin=distr_name[0]['name'],
gender=request.GET.get('ender')).\
values_list('id',
'name',
'gender',
'gender_neighb',
# 'sel_city',
# 'sel_distr',
'presence_animals',
'presence_flat',
'attitude_animals',
'attitude_smok',
'about_me',
'image',
'user_neig',
'cities',
'regin',)
print('catalog_filter')
print(catalog_filter)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
# формируем масив лдя прорисовки в шблоне
for all_goods_s in catalog_filter:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
p = CreateNeighbour(gender=tmp_s[2])
gender = p.get_gender_display()
p = CreateNeighbour(gender_neighb=tmp_s[3])
gender_neighb = p.get_gender_neighb_display()
# p = CreateNeighbour(sel_city=tmp_s[4])
# sel_city = p.get_sel_city_display()
#
# p = CreateNeighbour(sel_distr=tmp_s[5])
# sel_distr = p.get_sel_distr_display()
p = CreateNeighbour(attitude_animals=tmp_s[4])
attitude_animals = p.get_attitude_animals_display()
p = CreateNeighbour(attitude_smok=tmp_s[5])
attitude_smok = p.get_attitude_smok_display()
# cities_name = AllCities.objects.filter(id=tmp_s[11]).values('name')
# distr_name = AllCities.objects.filter(id=tmp_s[12]).values('name')
list_tmp = list(tmp_s)
list_tmp[2] = gender
list_tmp[3] = gender_neighb
# list_tmp[4] = sel_city
# list_tmp[5] = sel_distr
list_tmp[4] = attitude_animals
list_tmp[5] = attitude_smok
list_tmp[11] = cities_name[0]['name']
list_tmp[12] = distr_name[0]['name']
user_like = LikeModel.objects.filter(first_id=tmp_s[10]).filter(second_id=request.user.id).values_list('id') # лайки в шаблоне
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
tmp_categ_name.append(tmp_categ_name_list) # формируем массив со всеми анкетами
print('allCities')
print(tmp_categ_name)
return render(request, 'filtr_neig.html', {'filter_ne':tmp_categ_name})
# return JsonResponse(dict1)
def search_fetch_b(request):
# dict1 = {}
# filter_ne= CreateNeighbour.objects.filter(cities_id=request.GET.get('id')).values()
# print('filter_ne')
# print(filter_ne)
print('distr-513')
print(request.GET.get('id'))
print('seacrprof546')
print(request.GET.get('distr'))
print('seacrprof-548')
print(request.GET.get('prof'))
# dict1['data']=list(filter_ne)
allCities = AllCities.objects.all()
# catalog_filter = CreateNeighbour.objects. \
# catalog_filter=CreateNeighbour.objects.filter(cities_id=request.GET.get('id'),districts_tree=request.GET.get('distr'),gender=request.GET.get('ender')). \, category_bus=request.GET.get('prof'))\
cities_name = AllCities.objects.filter(id=request.GET.get('id')).values('name')
distr_name = AllCities.objects.filter(id=request.GET.get('distr')).values('name')
search_filer = CreateBusiness.objects.filter(cities=cities_name[0]['name'],
regin=distr_name[0]['name'],
category_bus=request.GET.get('prof')).values_list('name_bus',
# 'sel_city',
# 'sel_distr',
'category_bus',
'about_me',
'image',
'user_bus',
'id',
'cities',
'regin',)
print('search_filer-561')
print(search_filer)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
for all_goods_s in search_filer:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
# p = CreateBusiness(sel_city=tmp_s[1])
# sel_city = p.get_sel_city_display()
#
# p = CreateBusiness(sel_distr=tmp_s[2])
# sel_distr = p.get_sel_distr_display()
list_tmp = list(tmp_s)
# list_tmp[1] = sel_city
# list_tmp[2] = sel_distr
user_like = LikeBusinesModel.objects.filter(first_id=tmp_s[4]).filter(second_id=request.user.id).values_list('id')
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
tmp_categ_name.append(tmp_categ_name_list)# формируем массив со всеми анкетами
print('tmp_categ_name-598')
print(tmp_categ_name)
return render(request, 'test_busines.html', {'serxh_filer': tmp_categ_name})
# return JsonResponse(dict1)
|
drhtka/prifile
|
main/views.py
|
views.py
|
py
| 21,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects.filter",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects.filter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects.filter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects.all",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "create_profile.models.CreateNeighbour.objects.values_list",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour.objects",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateBusiness.objects.order_by",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateBusiness.objects",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.CreateBusiness",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "create_profile.models.AllCities.objects.all",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "create_profile.forms.CreateFormsB",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateBusiness.objects.values_list",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateBusiness.objects",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.CreateBusiness",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects.filter",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.values",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel.objects.values",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects.values",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "create_profile.models.CreateNeighbour.objects.filter",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour.objects",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateNeighbour",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects.filter",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeModel.objects",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeModel",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects.all",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "create_profile.models.AllCities.objects.filter",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "create_profile.models.AllCities.objects",
"line_number": 411,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.AllCities",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "create_profile.models.CreateBusiness.objects.filter",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "create_profile.models.CreateBusiness.objects",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.CreateBusiness",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects.filter",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "create_profile.models.LikeBusinesModel.objects",
"line_number": 443,
"usage_type": "attribute"
},
{
"api_name": "create_profile.models.LikeBusinesModel",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 456,
"usage_type": "call"
}
] |
2687378702
|
import numpy as np
import random as rnd
import matplotlib.pyplot as plt
CORDS = []
avaibleNodes = []
border = 30
x0y0 = [0,0]
CORDS.append(x0y0)
avaibleNodes.append([x0y0[0]+1,x0y0[1]])
avaibleNodes.append([x0y0[0],x0y0[1]+1])
avaibleNodes.append([x0y0[0]-1,x0y0[1]])
avaibleNodes.append([x0y0[0],x0y0[1]-1])
def AddMass():
global CORDS, avaibleNodes
tempCords = []
NavaibleNodes = len(avaibleNodes)
rand = rnd.randint(0,NavaibleNodes-1)
tempCords = avaibleNodes[rand]
CORDS.append(tempCords)
avaibleNodes.remove(tempCords)
newNodes = []
UpdateANodes(tempCords)
for node in avaibleNodes:
if node in CORDS:
avaibleNodes.remove(node)
def UpdateANodes(point):
global avaibleNodes, CORDS
newNodes = []
newNodes.append([point[0]+1,point[1]])
newNodes.append([point[0],point[1]+1])
newNodes.append([point[0]-1,point[1]])
newNodes.append([point[0],point[1]-1])
for newNode in newNodes:
if newNode in CORDS:
newNodes.remove(newNode)
for newNode in newNodes:
if newNode in avaibleNodes:
newNodes.remove(newNode)
for newNode in newNodes:
avaibleNodes.append(newNode)
counter = 0
while True:
counter +=1
AddMass()
if CORDS[-1][0] >= border or CORDS[-1][1] >= border or CORDS[-1][0] <= -border or CORDS[-1][1] <= -border:
break
CordsX = []
CordsY = []
aNodesX = []
aNodesY = []
for cord in CORDS:
CordsX.append(cord[0])
CordsY.append(cord[1])
for node in avaibleNodes:
aNodesX.append(node[0])
aNodesY.append(node[1])
COLORS = [
(1,0,0),
(1,127/255,0),
(1,1,0),
(0,1,0),
(0,0,1),
(75/255,0,130/255),
(148/255,0,211/255)
]
plt.rcParams['axes.facecolor']='black'
for i in range(7):
plt.plot(CordsX[i*int(counter/7):(i+1)*int(counter/7)],CordsY[i*int(counter/7):(i+1)*int(counter/7)],'.',color=COLORS[i])
plt.plot(aNodesX,aNodesY,'.',color='white')
plt.show()
|
filipmalecki94/Computer_modeling
|
lista4/zadanie1.py
|
zadanie1.py
|
py
| 1,915 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
}
] |
43418038023
|
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
img= cv.imread("cat/cat.png")
#cv.imshow("cat",img)
def rescale(frame, scale=0.5):
width = int(frame.shape[1]*scale)
height = int(frame.shape[0]*scale)
dims=(width,height)
return cv.resize(frame, dims, interpolation=cv.INTER_AREA)
resize_image=rescale(img)
surf = cv.xfeatures2d.SURF_create(800)
cv.imshow('Image',resize_image)
cv.waitKey(0)
|
aishanii/Python
|
main.py
|
main.py
|
py
| 444 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.xfeatures2d.SURF_create",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 16,
"usage_type": "call"
}
] |
73772810746
|
from typing import List
LAND = '1'
WATER = '0'
# TODO: Review a superior solutions
def overlaps(min1, max1, min2, max2):
overlap = max(0, min(max1, max2) - max(min1, min2))
if overlap > 0:
return True
if min1 == min2 or min1 == max2 or max1 == min2 or max1 == max2:
return True
if (min1 > min2 and max1 < max2) or (min2 > min1 and max2 < max1):
return True
return False
print(overlaps(0, 2, 1, 1))
# Definition for a Bucket.
class Bucket:
def __init__(self, identifiers: List[int]):
self.destination = None
self.identifiers = set(identifiers)
def hasDestination(self) -> bool:
return self.destination != None
def getDestination(self):
if not self.hasDestination():
return self
return self.destination.getDestination()
def combine(self, bucket):
otherDestination = bucket.getDestination()
thisDestination = self.getDestination()
uniqueIdentifiers = otherDestination.identifiers | thisDestination.identifiers
newBucket = Bucket(uniqueIdentifiers)
otherDestination.destination = newBucket
thisDestination.destination = newBucket
return newBucket
def contains(self, identifier: int) -> bool:
return identifier in self.getDestination().identifiers
class Solution:
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands.
An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.
You may assume all four edges of the grid are all surrounded by water.
'''
def numIslands(self, grid: List[List[str]]) -> int:
if len(grid) < 1:
return 0
nextRowIsland = 1
rowIslands = {}
currentRowIslandStart = None
'''
Here we are generating row islands that we will then be pairing with adjacent row islands to form
groups that we will then combine into the true islands that are needed to get the correct answer
'''
for rowIndex, row in enumerate(grid):
lastSpot = WATER
lengthOfRow = len(row)
rowIslands[rowIndex] = []
for spotIndex, spot in enumerate(row):
if lastSpot == WATER and spot == LAND:
currentRowIslandStart = spotIndex
if spotIndex + 1 >= lengthOfRow and spot == LAND:
rowIslands[rowIndex].append((nextRowIsland, currentRowIslandStart, spotIndex))
nextRowIsland += 1
currentRowIslandStart = None
elif spot == WATER and currentRowIslandStart != None:
rowIslands[rowIndex].append((nextRowIsland, currentRowIslandStart, spotIndex - 1))
nextRowIsland += 1
if spot == WATER:
currentRowIslandStart = None
lastSpot = spot
nextGroup = 1
maxRowIndex = len(grid)
rowIslandsToGroups = {}
for rowNumber in [rowNumber for rowNumber in range(maxRowIndex)]:
for rowIslandNumber, startIndex, endIndex in rowIslands[rowNumber]:
rowIslandsToGroups[rowIslandNumber] = []
if rowNumber == 0:
rowIslandsToGroups[rowIslandNumber].append(nextGroup)
nextGroup += 1
continue
for prevRowIslandNumber, prevStartIndex, prevEndIndex in rowIslands[rowNumber - 1]:
if overlaps(prevStartIndex, prevEndIndex, startIndex, endIndex):
for groupNumber in rowIslandsToGroups[prevRowIslandNumber]:
rowIslandsToGroups[rowIslandNumber].append(groupNumber)
if len(rowIslandsToGroups[rowIslandNumber]) == 0:
rowIslandsToGroups[rowIslandNumber].append(nextGroup)
nextGroup += 1
groupBuckets = {}
allBuckets = []
for rowIslandNumber in range(1, nextRowIsland):
relatedGroups = rowIslandsToGroups[rowIslandNumber]
for group in relatedGroups:
if (groupBuckets.get(group, None)) == None:
newGroupBucket = Bucket([group])
groupBuckets[group] = newGroupBucket
allBuckets.append(newGroupBucket)
relatedBuckets = [groupBuckets[group] for group in relatedGroups]
firstBucket = relatedBuckets[0]
for group in relatedGroups:
if not firstBucket.contains(group):
newCombinedBucket = firstBucket.combine(groupBuckets[group])
allBuckets.append(newCombinedBucket)
return len([resultBucket for resultBucket in allBuckets if not resultBucket.hasDestination()])
solver = Solution()
# 1
# inputGrid = [
# '11110',
# '11010',
# '11000',
# '00000',
# ]
# 3
# inputGrid = [
# '11000',
# '11000',
# '00100',
# '00011',
# ]
# 1
# inputGrid = [
# '11011',
# '10001',
# '10001',
# '11111',
# ]
# 5
# inputGrid = [
# '101',
# '010',
# '101',
# ]
# 1
inputGrid = [
'111',
'010',
'010',
]
print(solver.numIslands(inputGrid))
|
AndreiBoris/sample-problems
|
python/0200-numbers-of-islands/number-of-islands.py
|
number-of-islands.py
|
py
| 5,325 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 55,
"usage_type": "name"
}
] |
41636609732
|
from django.shortcuts import get_object_or_404, render, redirect, reverse
from django.http import HttpResponseRedirect
from .models import UserNotification
# Create your views here.
def notification_visited(request, pk):
notification = get_object_or_404(UserNotification, pk=pk)
question_id = notification.related_question.pk
notification.delete()
return HttpResponseRedirect(reverse('posts:question_page', args=(question_id,)))
def delete(request, pk):
get_object_or_404(UserNotification, pk=pk).delete()
return HttpResponseRedirect('')
|
bstecka/overboard
|
overboard/notifications/views.py
|
views.py
|
py
| 584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.UserNotification",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.UserNotification",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 17,
"usage_type": "call"
}
] |
10120459329
|
"""
Library and Wrapper for SI7021 temperature/humidity sensor.
Based on https://github.com/chrisbalmer/micropython-si7021, which implements a micropython driver.
Extensions: Added a wrapper function for getting directly the values
"""
from time import sleep
import sensors
import logging
class CRCError(Exception):
'Data failed a CRC check.'
pass
class Si7021(object):
'Driver for the Si7021 temperature sensor.'
SI7021_DEFAULT_ADDRESS = 0x40
SI7021_MEASTEMP_NOHOLD_CMD = bytearray([0xF3])
SI7021_MEASRH_NOHOLD_CMD = bytearray([0xF5])
SI7021_RESET_CMD = bytearray([0xFE])
SI7021_ID1_CMD = bytearray([0xFA, 0x0F])
SI7021_ID2_CMD = bytearray([0xFC, 0xC9])
I2C_WAIT_TIME = 0.025
def __init__(self, i2c, address=SI7021_DEFAULT_ADDRESS):
'Initialize an Si7021 sensor object.'
self.i2c = i2c
self.address = address
self.serial, self.identifier = self._get_device_info()
@property
def temperature(self):
'Return the temperature in Celcius.'
temperature = self._get_data(self.SI7021_MEASTEMP_NOHOLD_CMD)
celcius = temperature * 175.72 / 65536 - 46.85
return celcius
@temperature.setter
def temperature(self, value):
raise AttributeError('can\'t set attribute')
@property
def relative_humidity(self):
'Return the relative humidity as a percentage. i.e. 35.59927'
relative_humidity = self._get_data(self.SI7021_MEASRH_NOHOLD_CMD)
relative_humidity = relative_humidity * 125 / 65536 - 6
return relative_humidity
@relative_humidity.setter
def relative_humidity(self, value):
raise AttributeError('can\'t set attribute')
def reset(self):
'Reset the sensor.'
self.i2c.writeto(self.address, self.SI7021_RESET_CMD)
sleep(self.I2C_WAIT_TIME)
def _get_data(self, command):
'Retrieve data from the sensor and verify it with a CRC check.'
data = bytearray(3)
self.i2c.writeto(self.address, command)
sleep(self.I2C_WAIT_TIME)
self.i2c.readfrom_into(self.address, data)
value = self._convert_to_integer(data[:2])
verified = self._verify_checksum(data)
if not verified:
raise CRCError('Data read off i2c bus failed CRC check.',
data[:2],
data[-1])
return value
def _get_device_info(self):
'''Get the serial number and the sensor identifier. The identifier is
part of the bytes returned for the serial number.
'''
# Serial 1st half
self.i2c.writeto(self.address, self.SI7021_ID1_CMD)
id1 = bytearray(8)
sleep(self.I2C_WAIT_TIME)
self.i2c.readfrom_into(self.address, id1)
# Serial 2nd half
self.i2c.writeto(self.address, self.SI7021_ID2_CMD)
id2 = bytearray(6)
sleep(self.I2C_WAIT_TIME)
self.i2c.readfrom_into(self.address, id2)
combined_id = bytearray([id1[0], id1[2], id1[4], id1[6],
id2[0], id2[1], id2[3], id2[4]])
serial = self._convert_to_integer(combined_id)
identifier = self._get_device_identifier(id2[0])
return serial, identifier
def _convert_to_integer(self, bytes_to_convert):
'Use bitwise operators to convert the bytes into integers.'
integer = None
for chunk in bytes_to_convert:
if not integer:
integer = chunk
else:
integer = integer << 8
integer = integer | chunk
return integer
def _get_device_identifier(self, identifier_byte):
'''Convert the identifier byte to a device identifier. Values are based
on the information from page 24 of the datasheet.
'''
if identifier_byte == 0x00 or identifier_byte == 0xFF:
return 'engineering sample'
elif identifier_byte == 0x0D:
return 'Si7013'
elif identifier_byte == 0x14:
return 'Si7020'
elif identifier_byte == 0x15:
return 'Si7021'
else:
return 'unknown'
def _verify_checksum(self, data):
''''Verify the checksum using the polynomial from page 19 of the
datasheet.
x8 + x5 + x4 + 1 = 0x131 = 0b100110001
Valid Example:
byte1: 0x67 [01100111]
byte2: 0x8c [10001100]
byte3: 0xfc [11111100] (CRC byte)
'''
crc = 0
values = data[:2]
checksum = int(data[-1])
for value in values:
crc = crc ^ value
for _ in range(8, 0, -1):
if crc & 0x80: #10000000
crc <<= 1
crc ^= 0x131 #100110001
else:
crc <<= 1
if crc != checksum:
return False
else:
return True
def convert_celcius_to_fahrenheit(celcius):
'Convert a Celcius measurement into a Fahrenheit measurement.'
return celcius * 1.8 + 32
def get_reading(sda_pin, scl_pin, vcc_pin=None):
""" Returns temperature/humidity/serial reading, for given I2C SCL/SDA and VCC pins """
from machine import I2C
sensors.set_sensor_power_on(vcc_pin)
# initialization & measurement
i2c = I2C(0, pins=(sda_pin, scl_pin))
# check for i2c scan
# method 1: run scan and wait for non-empty list: this seems to hang for more than 500 secs if power supply is not right
i2c_scan = i2c.scan()
temp = None
hum = None
# method 2 : read a dummy byte from device id 64 and if there is no device identified it will raise an exception. Seems much faster
try:
i2c.readfrom(0x40, 1) # here we assume our device has id 0x40 (64)
# if we reach this point we can go on with reading values
sensor = Si7021(i2c)
# get values
temp = sensor.temperature
hum = sensor.relative_humidity
except Exception as e:
logging.exception(e, "Exception raised in I2C {}")
# disable sensor and supply to sensor
i2c.deinit()
sensors.set_sensor_power_off(vcc_pin)
return(temp, hum)
|
insighio/insighioNode
|
insighioNode/lib/sensors/si7021.py
|
si7021.py
|
py
| 6,216 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "sensors.set_sensor_power_on",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "machine.I2C",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "sensors.set_sensor_power_off",
"line_number": 200,
"usage_type": "call"
}
] |
28205735632
|
poppler_path=r"C:\Users\ahmed\Downloads\New folder\Release-22.04.0-0\poppler-22.04.0\Library\bin"
pdf_path = r"D:\certifiaction\freelancing.pdf"
from pdf2image import convert_from_path
pages=convert_from_path(pdf_path=pdf_path,poppler_path=poppler_path)
import os
saving_folder=r"D:\certifiaction"
c=1
for page in pages:
img_name=f"img-{c}.png"
page.save(os.path.join(saving_folder,img_name),"png")
c+=1
|
ahmedsayed545/ahmedsayed545
|
convert pdf to img.py
|
convert pdf to img.py
|
py
| 438 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pdf2image.convert_from_path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
}
] |
41063841441
|
import logging
from threading import Thread
class BasicComponent(Thread):
"""
Basic class, more elaborated components will herit from this class.
"""
def __init__(self, ID, dest_endpoint, mode, queue):
#Assert validity of the parameters received
endpoint_above, endpoint_below = dest_endpoint
if not (mode == "server" or mode == "client"):
raise Exception("Malformed input")
if not isinstance(ID, int) or \
not isinstance(endpoint_below, int) or \
not isinstance(endpoint_above, int):
raise Exception("Malformed input")
self.queue = queue
self.dest_above = endpoint_above
self.dest_below = endpoint_below
self.ID = ID
self.mode = mode
self.incomming_events = {}
self.condition = queue.condition
#Init the parent class
Thread.__init__(self)
def create_payload(self, content):
payload = {}
payload['event_ID'] = 1
payload['stream_ID'] = 1
payload['timestamp'] = 1.0
payload['valid_for'] = 1
payload['content'] = content
return payload
#Method to create an event
def create_event(self, event_type, endpoint, payload = "payload"):
if not (endpoint == self.dest_above or endpoint == self.dest_below):
raise Exception("Malformed input")
try:
event = self.queue.create_event(self.ID, endpoint, event_type)
event.set_payload(payload)
except Exception as e:
logging.error(str(e))
return event
#Method to send an event to the queue
def send_event(self, event):
self.condition.acquire()
rsp = self.queue.add(event)
self.condition.notify_all()
self.condition.release()
return rsp
#Method to process an event retrieved from the queue,
#Must me implemented in child class
def process_events(self):
pass
#Method to get events, depends on what this component do (peek, get or ...)
def get_events(self):
empty = True
prev = self.queue.preview()
for key, value in prev.items():
if value.dest_ID == self.ID:
self.incomming_events[key] = self.queue.get(self.ID, key)
empty = False
return empty
#Method used to create the thread for a component
def run(self):
while True:
self.condition.acquire()
#Loop until it get events related to its component ID
while self.get_events():
#Wait to receive a notification for a change in the queue
self.condition.wait()
#Events received, processing
try:
self.process_events()
except Exception as e:
logging.warning(str(e))
#Release the underlying lock
self.condition.release()
if __name__ == '__main__':
pass
|
ylaker/tweakable-pt
|
components/Base.py
|
Base.py
|
py
| 2,995 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "threading.Thread",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 89,
"usage_type": "call"
}
] |
22676158140
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import selectivesearch
from predictors import resnet152
from utils import selectors, nms
import xml.etree.ElementTree as ET
import random
boxes = {}
def getImgReady(img, show=False):
if img is None:
return None
if show:
plt.imshow(img)
plt.axis('off')
# convert into format (batch, RGB, width, height)
img = cv2.resize(img, (224, 224))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = img[np.newaxis, :]
return img
def addBox(x, y, w, h, prob, label):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
if not boxes.has_key(label):
boxes[label] = [[x1, y1, x2, y2, prob]]
else:
boxes[label].append([x1, y1, x2, y2, prob])
def drawGroundTruth(imgName, img):
xmlTree = ET.parse(annotationsDir + '{}.xml'.format(imgName.split('.')[0])) # reads corresponding XML file
for object in xmlTree.findall('object'):
name = object.find('name').text
name = nameDict[name]
bndbox = object.find('bndbox')
xmin = int(float(bndbox.find('xmin').text)) #reads coordinates
ymin = int(float(bndbox.find('ymin').text))
xmax = int(float(bndbox.find('xmax').text))
ymax = int(float(bndbox.find('ymax').text))
rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
fill=False, edgecolor=(0, 1, 0), linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(xmin, ymin - 2, 'Ground Truth:{:s}'.format(name),
bbox=dict(facecolor=(0, 1, 0), alpha=0.5), fontsize=12, color='white')
def getNameDict(filename):
dic = {}
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip('\n')
key = line.split(' ')[0]
val = line.split(' ')[1]
dic[key] = val
return dic
annotationsDir = '../Data/ImageNet/ILSVRC2012/val-Annotations/'
imgsDir = '../Data/ImageNet/ILSVRC2012/img_val/'
nameDict = getNameDict('../synset.txt')
imgName = selectors.selectImg(imgsDir)
imgPath = imgsDir + imgName
img = cv2.cvtColor(cv2.imread(imgPath), cv2.COLOR_BGR2RGB)
plt.figure(imgName.split('.')[0])
plt.imshow(img)
plt.axis('off')
img_label, regions = selectivesearch.selective_search(img, scale = 500, sigma = 0.9, min_size = 500)
for i, region in enumerate(regions): #rect:x y w h
x = region['rect'][0]
y = region['rect'][1]
w = region['rect'][2]
h = region['rect'][3]
croppedImg = img[y:y + h,x:x + w]
croppedImg = getImgReady(croppedImg)
prob, label = resnet152.predict(croppedImg)
if prob < 0.2: #ignore low probability boxes
continue
addBox(x, y, w, h, prob, label)
for label in boxes:
color = (random.random(), random.random(), random.random())
indexes = nms.nms(np.array(boxes[label]), 0.3)
for i in indexes:
x1 = boxes[label][i][0]
y1 = boxes[label][i][1]
x2 = boxes[label][i][2]
y2 = boxes[label][i][3]
prob = boxes[label][i][4]
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(label, prob),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
drawGroundTruth(imgName, img)
plt.show()
|
juvu/ImageSearch
|
test/testPredictorInterface.py
|
testPredictorInterface.py
|
py
| 3,482 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "cv2.resize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.swapaxes",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.swapaxes",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Rectangle",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "utils.selectors.selectImg",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "utils.selectors",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "selectivesearch.selective_search",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "predictors.resnet152.predict",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "predictors.resnet152",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "utils.nms.nms",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "utils.nms",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.Rectangle",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
}
] |
39654499374
|
import os
import glob
import PIL.Image as I
def load_cityscapes_sem_seg_dict(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
suffix = "leftImg8bit.png"
assert image_file.endswith(suffix)
prefix = image_dir
label_file = gt_dir + image_file[len(prefix): -len(suffix)] + "gtFine_labelTrainIds.png"
assert os.path.isfile(
label_file
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py"
w, h = I.open(label_file).size
ret.append(
{
'file_name': image_file,
'sem_seg_file_name': label_file,
'height': h,
'width': w
}
)
return ret
|
lqxisok/llSeg
|
datasets/segmentation/cityscapes.py
|
cityscapes.py
|
py
| 1,152 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "glob.glob",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 26,
"usage_type": "name"
}
] |
30273124330
|
from django.contrib.auth.backends import RemoteUserBackend
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.contrib.auth import get_user_model
class GroupEnabledRemoteUserBackend(RemoteUserBackend):
def authenticate(self, user, remote_groups):
user.remote_groups = remote_groups
user.is_superuser = self._is_superuser(user)
user.is_staff = self._is_staff(user)
return user
def _is_superuser(self, user):
superuser_groups = getattr(settings, 'CUMULUS_SUPERUSER_GROUPS', [])
superusers = getattr(settings, 'CUMULUS_SUPERUSERS', [])
result = user.username in superusers or \
set(user.remote_groups).intersection(set(superuser_groups))
return bool(result)
def _is_staff(self, user):
staff_groups = getattr(settings, 'CUMULUS_STAFF_GROUPS', [])
staff_users = getattr(settings, 'CUMULUS_STAFF_USERS', [])
result = user.username in staff_users or \
set(user.remote_groups).intersection(set(staff_groups))
return bool(result)
|
jcmcken/cloudcover-cumulus
|
cumulus/backends.py
|
backends.py
|
py
| 1,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.backends.RemoteUserBackend",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.conf.settings",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 22,
"usage_type": "argument"
}
] |
39938022567
|
#%%
import nltk
import vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import numpy as np
import json
import glob
import errno
import pandas
import random
import requests
from bs4 import BeautifulSoup
import news_web_scraping as NWS
#%%
# FUNCTIONS
# input: text (string)
# output: average sentiment of the text and a list containing a tuple of each sentence with its corresponding sentiment
# sentiment is rating between -4 (EXTREAMLY negative) and 4 (EXTREAMLY positive)
def calcSentiment(text):
analyzer = SentimentIntensityAnalyzer()
sentence_list = nltk.sent_tokenize(text)
sentiment_list = []
if not sentence_list:
return sentence_list, sentiment_list
#raise ValueError('no sentences in the text given -> 0/0')
for sentence in sentence_list:
vs = analyzer.polarity_scores(sentence)
compound_sentiment = round(vs["compound"]*4, 4)
sentiment_list = sentiment_list + [compound_sentiment]
return sentence_list, sentiment_list
def get_json_text(json_file):
json_string = json_file.read()
json_dict = json.loads(json_string)
return json_dict["text"]
def split_toParagraphs(article_text):
paragraph_list = article_text.split("\n")
paragraph_list = [paragraph.strip(' ') for paragraph in paragraph_list]
paragraph_list = list(filter(None, paragraph_list))
return paragraph_list
def paragraph_analysis(paragraph_list):
all_para_sentiments = []
all_para_sentences = []
for paragraph in paragraph_list:
para_sentences, para_sentiments = calcSentiment(paragraph)
if para_sentiments:
para_sentiment_avg = np.mean(np.asarray(para_sentiments))
all_para_sentiments = all_para_sentiments + [para_sentiment_avg]
all_para_sentences = all_para_sentences + [para_sentences]
return all_para_sentences, all_para_sentiments
# preforms sentiment analysis on each article in a list of articles
def article_list_analysis(article_list):
article_sentence_list = []
sentence_sentiment_list = []
avg_article_sentiment_list = []
for article_text in article_list:
if article_text:
article_text = str(article_text)
sentence_list, sentiment_list = calcSentiment(article_text)
if sentiment_list:
article_sentiment = np.mean(np.asarray(sentiment_list))
article_sentence_list = article_sentence_list + [sentence_list]
sentence_sentiment_list = sentence_sentiment_list + [sentiment_list]
avg_article_sentiment_list = avg_article_sentiment_list + [article_sentiment]
return article_sentence_list, sentence_sentiment_list, avg_article_sentiment_list
def politi_buzz_analysis(path):
files = glob.glob(path)
article_list = []
for file_name in files:
try:
with open(file_name, 'r') as json_file:
article_text = get_json_text(json_file)
article_list = article_list + [article_text]
except IOError as exc:
if exc.errno != errno.EISDIR:
raise
return article_list_analysis(article_list)
def BBC_analysis(path):
files = glob.glob(path)
article_list = []
for file_name in files:
try:
with open(file_name, 'r') as txt_file:
article_text = txt_file.read()
article_list = article_list + [article_text]
except IOError as exc:
if exc.errno != errno.EISDIR:
raise
return article_list_analysis(article_list)
def kaggle_Fact_Fake_analysis(path):
df = pandas.read_csv(path)
fake_df = df.loc[df['label'] == 1]
real_df = df.loc[df['label'] == 0]
article_list_fake = fake_df['text'].values.tolist()
article_list_real = real_df['text'].values.tolist()
article_list_fake = random.sample(article_list_fake, 100) # needed as 13000 takes too long
article_list_real = random.sample(article_list_real, 100) # needed as 13000 takes too long
fake_sentence_list, sentence_sentiment_fake_list, article_sentiment_fake_list = article_list_analysis(article_list_fake)
real_sentence_list, sentence_sentiment_real_list, article_sentiment_real_list = article_list_analysis(article_list_real)
return article_sentiment_fake_list, fake_sentence_list, sentence_sentiment_fake_list, article_sentiment_real_list, real_sentence_list, sentence_sentiment_real_list
def avg_var_calculation(sentiment_list, factOrFake, news_source):
sentiment_array = np.asarray(sentiment_list)
sentiment_avg = np.mean(sentiment_array)
sentiment_var = np.var(sentiment_array)
print(factOrFake + " article avg " + news_source + ": " + str(sentiment_avg))
print(factOrFake + " article var " + news_source + ": " + str(sentiment_var))
return sentiment_array, sentiment_avg, sentiment_var
def cal_article_abs_sentiment(list_list):
mean_list = []
for list in list_list:
mean = np.mean(np.absolute(np.asarray(list)))
mean_list = mean_list + [mean]
return np.asarray(mean_list)
def kaggle_mult_news_analysis(path, publication):
df = pandas.read_csv(path)
df_publication = df.loc[df['publication'] == publication]
article_list = df_publication['content'].values.tolist()
article_list = random.sample(article_list, 100) # needed as otherwise it takes too long
sentence_list, sentence_sentiments, article_sentiments = article_list_analysis(article_list)
return sentence_list, sentence_sentiments, article_sentiments
#def kaggel_Fake_analysis(path):
# df = pandas.read_csv(path)
# article_list = df['text'].values.tolist()
# art_sub_list = random.sample(article_list, 100) # needed as 13000 takes too long
# sentence_list, sentence_sentiment_list, article_sentiment_tot_list = article_list_analysis(art_sub_list)
# return article_sentiment_tot_list, sentence_list, sentence_sentiment_list
# scrapes NYT articles and preforms sentiment analysis on them
def NYT_scrape_SA(url):
title_list, article_list = NWS.NYT_page_scrape(url)
sentence_list, sentence_sentiments, article_sentiments = article_list_analysis(article_list)
return title_list, sentence_list, sentence_sentiments, article_sentiments
|
OrionMat/Sentiment-Analysis
|
newsSentimentAnalysis.py
|
newsSentimentAnalysis.py
|
py
| 6,297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "nltk.sent_tokenize",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "errno.EISDIR",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "errno.EISDIR",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.var",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "news_web_scraping.NYT_page_scrape",
"line_number": 143,
"usage_type": "call"
}
] |
18399962026
|
# from autogluon.tabular import TabularDataset, TabularPredictor
# train_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv')
# f = open('/workfile.txt', 'r+')
# f.write(train_data)
# subsample_size = 500 # subsample subset of data for faster demo, try setting this to much larger values
# train_data = train_data.sample(n=subsample_size, random_state=0)
# train_data.head()
# label = 'class'
# print("Summary of class variable: \n", train_data[label].describe())
# save_path = 'agModels-predictClass' # specifies folder to store trained models
# predictor = TabularPredictor(label=label, path=save_path).fit(train_data)
# test_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv')
# y_test = test_data[label] # values to predict
# test_data_nolab = test_data.drop(columns=[label]) # delete label column to prove we're not cheating
# test_data_nolab.head()
# predictor = TabularPredictor.load(save_path) # unnecessary, just demonstrates how to load previously-trained predictor from file
# y_pred = predictor.predict(test_data_nolab)
# test_data_nolab[label] = y_pred
# test_data_nolabel.to_csv(filelist[3],encoding='utf-8', index=False)F
# print("Predictions: \n", y_pred)
# perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred, auxiliary_metrics=True)
# import os
# import os.path
# from os import path
# # import aif360.datasets.compas_dataset
# # filelist = ['workfile.csv', 'workfile_test.csv', 'workfile_train.csv', 'workfile_test_predictions.csv']
# # for file in filelist:
# # if path.isfile(file):
# # os.remove(file)
# import pandas as pd
# # protected_attributes = ['age', 'sex', 'race', 'gender', 'ethnicity',
# # 'marital status', 'religion', 'national origin',
# # 'public assistance', 'disability', 'pregnancy', 'maternity']
# # df = pd.read_csv('workfile.csv')
# # print(list(df))
# # present_protected_attributes = []
# # for attr in df:
# # for prot in protected_attributes:
# # if prot in attr:
# # present_protected_attributes.append(attr)
# # print(present_protected_attributes)
from aif360.datasets import StandardDataset
from aif360.metrics import BinaryLabelDatasetMetric
from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
from itertools import permutations
label = 'decile_score'
# protected_attr = 'sex'
# priveledged = 'Female'
# unpriveledged = 'Male'
df = pd.read_csv('compas-scores-low-med-high.csv')
# df[label].mask(df[label] <= 4, 0, inplace=True)
# df[label].mask(df[label] >= 5, 1, inplace=True)
# print(df.head())
# df.fillna(0)
# # # now that we have the binary encoding for scores we can calculate bias metrics
oe = OrdinalEncoder()
df[["sex", "age_cat", "race", "c_days_from_compas", "c_charge_degree", "c_charge_desc"]] = oe.fit_transform(
df[["sex", "age_cat", "race", "c_days_from_compas", "c_charge_degree", "c_charge_desc"]])
df.to_csv("compas-scores-orig-processed.csv", encoding='utf-8', index=False)
df[label].mask(df[label] == 0, 0, inplace=True)
df[label].mask(df[label] >= 2, 1, inplace=True)
df.fillna(0)
# print(df.head())
# dataset = StandardDataset(df,
# label_name=label,
# favorable_classes=[0],
# protected_attribute_names=[protected_attr],
# privileged_classes=[[0]])
# privileged_groups = [{protected_attr: 0}]
# unprivileged_groups = [{protected_attr: 1}]
# metric_orig_train = BinaryLabelDatasetMetric(dataset,
# unprivileged_groups=unprivileged_groups,
# privileged_groups=privileged_groups)
# print(metric_orig_train.mean_difference())
# print(metric_orig_train.disparate_impact())
# if not path.isfile('woo.csv'):
# print('it works')
# from flask import Flask, render_template, request
# from autogluon.tabular import TabularPredictor
import pandas as pd
from sklearn.model_selection import train_test_split
# import json
# import os
# import shutil
import os.path
# from os import path
import itertools
# label = 'decile_score'
# df = pd.read_csv('compas-scores-orig2.csv')
# make all headers lowercase
# df.columns = df.columns.str.lower()
# # make client label input all lowercase
# label = label.lower()
# df[label].mask(df[label] <= 4, 0, inplace=True)
# df[label].mask(df[label].between(5,7), 1, inplace=True)
# df[label].mask(df[label] >= 8, 2, inplace=True)
# df.fillna(0)
# split the csv into train and test data
# train_data, test_data = train_test_split(df, test_size=0.2)
# # save test and train csv's
# df.to_csv('low_medium_high.csv', encoding='utf-8', index=False)
# models_dir = 'agModels-predictClass'
# predictor = TabularPredictor(label=label, path=models_dir).fit(
# train_data=train_data, presets='best_quality')
# y_test = test_data[label] # values to predict
# # delete label column
# test_data_nolabel = test_data.drop(columns=[label])
# # predict
# y_pred = predictor.predict(test_data_nolabel)
# save predictions
# test_data = test_data_nolabel
# test_data_nolabel[label] = y_pred
# test_data_nolabel.to_csv('compas-test.csv',
# encoding='utf-8', index=False)
# get random row like in the code
# df = test_data.sample()
# get protected attributes permutations
# protected_attributes = ['age_cat', 'sex', 'race']
# need to make a new df with all permutations of 3 columns
# sex_perms = ['Male', 'Female']
# age_cat_perms = ['Greater than 45', '25-45', 'Less than 25']
# race_perms = ['Other', 'African-American',
# 'Hispanic', 'Asian', 'Native American']
# protected_atrributes = [sex_perms, age_cat_perms, race_perms]
# all_permutations = list(itertools.product(*protected_atrributes))
# print(all_permutations)
# new_df = df.reset_index()
# new_dict = new_df.to_dict('records')
# my_dict = new_dict[0]
# del my_dict['index']
# output = pd.DataFrame()
# # create new df with all permutations
# for permutation in all_permutations:
# my_dict['sex'] = permutation[0]
# my_dict['age_cat'] = permutation[1]
# my_dict['race'] = permutation[2]
# print(my_dict)
# output = output.append(my_dict, ignore_index=True)
# print(output.head())
# predictor = TabularPredictor.load(models_dir)
# y_pred = predictor.predict(output)
# # append predictions
# output[label] = y_pred
# # save
# output.to_csv('permutations_csv',
# encoding='utf-8', index=False)
# protected attributes who are the most unpriveledged overall
total_dict_unpriveledged_mean = {}
# protected attributes who have the highest benefit overall
total_dict_higher_benefit = {}
protected_attributes = ['age_cat', 'sex', 'race']
processed_data_sex = {'Male': 1, 'Female': 0}
processed_data_age_cat = {'age_cat_greater_than_45': 1,
'age_cat_25_to_45': 0, 'age_cat_less_than_25': 2}
processed_data_race = {'African-American': 0, 'Asian': 1,
'Caucasion': 2, 'Hispanic': 3, 'Native American': 4, 'Other': 5}
dataset = StandardDataset(df,
label_name=label,
# 0 represents low recidivism score
favorable_classes=[0],
protected_attribute_names=['sex'],
# for priveleged class we will use our dict
privileged_classes=[[0]]) # Female
# Again using our dict
privileged_groups = [{'sex': 0}] # Female
unprivileged_groups = [{'sex': 1}] # Male
metric_orig_train = BinaryLabelDatasetMetric(dataset,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
# A negative value indicates less favorable outcomes for the unprivileged groups.
mean_diff = metric_orig_train.mean_difference()
# The ideal value of this metric is 1.0 A value < 1 implies higher benefit for the privileged group and a
# value >1 implies a higher benefit for the unprivileged group.
disparate_impact = metric_orig_train.disparate_impact()
if mean_diff < 0:
sex_unpriveledged_mean = (list(processed_data_sex.keys())[list(processed_data_sex.values()).index(1)])
total_dict_unpriveledged_mean[sex_unpriveledged_mean] = mean_diff
key = "{}_mean : {}".format(sex_unpriveledged_mean, mean_diff)
print(key)
if disparate_impact < 1:
higher_benefit_priveledged = (list(processed_data_sex.keys())[list(processed_data_sex.values()).index(0)])
total_dict_higher_benefit[higher_benefit_priveledged] = disparate_impact
key = "{}_disparate_benefit: {}".format(higher_benefit_priveledged, disparate_impact)
print(key)
# if disparate_impact > 1:
# higher_benefit_unpriveledged = (list(pocessed_data_sex.keys())[list(pocessed_data_sex.values()).index(1)])
# total_dict_higher_benefit[higher_benefit_unpriveledged] = disparate_impact
# key = "{}_disparate_benefit: {}".format(higher_benefit_unpriveledged, disparate_impact)
# print(key)
permutations_age_cat = list( permutations( range( 3 ), 2 ) )
lowest_mean_diff = 0
lowest_disparate_impact = 1
mean_list_unpriveledged = []
disparate_impact_list_benefit = []
for combination in permutations_age_cat:
dataset = StandardDataset(df,
label_name=label,
# 0 represents low recidivism score
favorable_classes=[0],
protected_attribute_names=['age_cat'],
# for priveleged class we will use our dict
privileged_classes=[[combination[0]]])
# Again using our dict
privileged_groups = [{'age_cat': combination[0]}]
unprivileged_groups = [{'age_cat': combination[1]}]
metric_orig_train = BinaryLabelDatasetMetric(dataset,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
mean_diff = metric_orig_train.mean_difference()
disparate_impact = metric_orig_train.disparate_impact()
if mean_diff < 0 and mean_diff < lowest_mean_diff:
least_priveledged = combination[1]
lowest_mean_diff = mean_diff
age_unpriveledged = (list(processed_data_age_cat.keys())[list(processed_data_age_cat.values()).index(least_priveledged)])
mean_list_unpriveledged.append(age_unpriveledged)
if disparate_impact < 1 and disparate_impact < lowest_disparate_impact:
highest_priveledged = combination[0]
lowest_disparate_impact = disparate_impact
higher_benefit_priveledged = (list(processed_data_age_cat.keys())[list(processed_data_age_cat.values()).index(highest_priveledged)])
disparate_impact_list_benefit.append(higher_benefit_priveledged)
age_unpriveledged = (list(processed_data_age_cat.keys())[list(processed_data_age_cat.values()).index(least_priveledged)])
total_dict_unpriveledged_mean[age_unpriveledged] = lowest_mean_diff
key = "{}_mean : {}".format(age_unpriveledged, lowest_mean_diff)
print(key)
higher_benefit_priveledged = (list(processed_data_age_cat.keys())[list(processed_data_age_cat.values()).index(highest_priveledged)])
total_dict_higher_benefit[higher_benefit_priveledged] = lowest_disparate_impact
key = "{}_disparate_benefit: {}".format(higher_benefit_priveledged, lowest_disparate_impact)
print(key)
age_list = ['age_cat_greater_than_45', 'age_cat_25_to_45', 'age_cat_less_than_25']
age_mean_least_counts = {}
for val in age_list:
c = mean_list_unpriveledged.count(val)
age_mean_least_counts[val] = c
# To return
age_highest_benefit_counts = {}
for val in age_list:
c = disparate_impact_list_benefit.count(val)
age_highest_benefit_counts[val] = c
# print((list(processed_data_age_cat.keys())[list(processed_data_age_cat.values()).index(least_priveledged)]))
# print((list(processed_data_age_cat.keys())[list(processed_data_age_cat.values()).index(compared_to)]))
# print(worst)
permutations_race = permutations_age_cat = list( permutations( range( 6 ), 2 ) )
# reset values
lowest_mean_diff = 0
lowest_disparate_impact = 1
mean_list_unpriveledged_race = []
disparate_impact_list_benefit_race = []
for combination in permutations_race:
dataset = StandardDataset(df,
label_name=label,
# 0 represents low recidivism score
favorable_classes=[0],
protected_attribute_names=['race'],
# for priveleged class we will use our dict
privileged_classes=[[combination[0]]])
# Again using our dict
privileged_groups = [{'race': combination[0]}]
unprivileged_groups = [{'race': combination[1]}]
metric_orig_train = BinaryLabelDatasetMetric(dataset,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
mean_diff = metric_orig_train.mean_difference()
disparate_impact = metric_orig_train.disparate_impact()
if mean_diff < 0:
if mean_diff < lowest_mean_diff:
least_priveledged = combination[1]
lowest_mean_diff = mean_diff
race_unpriveledged = (list(processed_data_race.keys())[list(processed_data_race.values()).index(combination[1])])
mean_list_unpriveledged_race.append(race_unpriveledged)
if disparate_impact < 1:
if disparate_impact < lowest_disparate_impact:
highest_priveledged = combination[0]
lowest_disparate_impact = disparate_impact
higher_benefit_priveledged = (list(processed_data_race.keys())[list(processed_data_race.values()).index(combination[0])])
disparate_impact_list_benefit_race.append(higher_benefit_priveledged)
race_unpriveledged = (list(processed_data_race.keys())[list(processed_data_race.values()).index(least_priveledged)])
total_dict_unpriveledged_mean[race_unpriveledged] = lowest_mean_diff
higher_benefit_priveledged = (list(processed_data_race.keys())[list(processed_data_race.values()).index(highest_priveledged)])
total_dict_higher_benefit[higher_benefit_priveledged] = lowest_disparate_impact
race_list = ['African-American', 'Asian', 'Caucasion', 'Hispanic', 'Native American', 'Other']
race_mean_least_counts = {}
for val in race_list:
c = mean_list_unpriveledged_race.count(val)
race_mean_least_counts[val] = c
# To return
race_highest_benefit_counts = {}
for val in race_list:
c = disparate_impact_list_benefit_race.count(val)
race_highest_benefit_counts[val] = c
print("\nAge Category: unpriveledged occurences - mean diffence")
print(age_mean_least_counts)
print("\nAge Category: higher benefit occurrences - disparate impact")
print(age_highest_benefit_counts)
print("\nRace: unpriveledged occurences - mean diffence")
print(race_mean_least_counts)
print("\nRace: higher benefit occurrences - disparate impact")
print(race_highest_benefit_counts)
print("\nTotal: Highest benefitting attribute - disparate impact")
print(total_dict_higher_benefit)
print("\nTotal: Most Unpriveledged attribute - mean difference")
print(total_dict_unpriveledged_mean)
# print((list(processed_data_race.keys())[list(processed_data_race.values()).index(least_priveledged)]))
# print((list(processed_data_race.keys())[list(processed_data_race.values()).index(compared_to)]))
# # print(worst_mean)
# print(least_priveledged_list)
# print(max(set(least_priveledged_list), key=least_priveledged_list.count))
# print("Most unpriveledged to Least")
# for i in range (5):
# print(least_priveledged_list.count(i))
# for unpriv in least_priveledged_list:
# print((list(processed_data_race.keys())[list(processed_data_race.values()).index(unpriv)]))
dataset = StandardDataset(df,
label_name=label,
# 0 represents low recidivism score
favorable_classes=[0],
protected_attribute_names=['race'],
# for priveleged class we will use our dict
privileged_classes=[[0]])
# Again using our dict
privileged_groups = [{'race': 0}]
unprivileged_groups = [{'race': 4}]
metric_orig_train = BinaryLabelDatasetMetric(dataset,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
mean_diff = metric_orig_train.mean_difference()
disparate_impact = metric_orig_train.disparate_impact()
print(mean_diff)
# from itertools import permutations
# all_combinations = list( permutations( range( 6 ), 2 ) )
# print(all_combinations)
|
kehwhy/HAI-analysis-tool
|
back-end/test_api_logic.py
|
test_api_logic.py
|
py
| 16,932 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OrdinalEncoder",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "aif360.datasets.StandardDataset",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "aif360.metrics.BinaryLabelDatasetMetric",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "aif360.datasets.StandardDataset",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "aif360.metrics.BinaryLabelDatasetMetric",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "aif360.datasets.StandardDataset",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "aif360.metrics.BinaryLabelDatasetMetric",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "aif360.datasets.StandardDataset",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "aif360.metrics.BinaryLabelDatasetMetric",
"line_number": 390,
"usage_type": "call"
}
] |
38424940724
|
#!/usr/bin/env python3
import cv2
import numpy as np
import dlib
import Facerecognize as fr
font=cv2.FONT_ITALIC
cap=cv2.VideoCapture(0)
detector=dlib.get_frontal_face_detector()
face_data,face_id,dic=fr.training_dataset_and_labels()
recognizer=cv2.face.LBPHFaceRecognizer_create()
recognizer.train(face_data,np.array(face_id))
while cap.isOpened():
status,frame=cap.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces=detector(gray)
for face in faces:
x1=face.left()
y1=face.top()
x2=face.right()
y2=face.bottom()
cv2.rectangle(frame,(x1,y1),(x2,y2),(0,255,0),2)
value,confidence=recognizer.predict(gray[y1:y2,x1:x2])
text=dic[str(value)]
if confidence < 100:
cv2.putText(frame,text,(x1,y1),font,2,(255,0,0),4)
#print(value)
#print(confidence)
cv2.imshow("live",frame)
if cv2.waitKey(30) & 0xff==ord('q'):
break
cv2.destroyAllWindows()
cap.release()
|
prem-pratap/Face-Recognition-System
|
reco.py
|
reco.py
|
py
| 982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.FONT_ITALIC",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "Facerecognize.training_dataset_and_labels",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.face.LBPHFaceRecognizer_create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.face",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 32,
"usage_type": "call"
}
] |
31188466771
|
import discord
from discord.ext import commands
import datetime
import nekos
import asyncio
import wikipedia
import pyowm
import os
from discord import utils
from discord.ext.commands import Bot
import urllib.parse
import re
import json
import io
import requests
import random
import time
from Cybernator import Paginator
import COVID19Py
Bot = commands.Bot(command_prefix='Lt!')
Bot.remove_command('help')
def postfix(num:int, end_1:str='год', end_2:str='года', end_3:str='лет'):
num = num % 10 if num > 20 else num # Делим число на 10 и получаем то что осталось после деления, дальше проверка это больше чем 20 или нет, если больше то оставляем число не изменныс, а если меньше то заменяем число на остаток деления
return end_1 if num == 1 else end_2 if 1 < num < 5 else end_3 # Тут уже просто прлверяем
@Bot.event
async def on_ready():
print('Ну я тут')
@Bot.command()
async def help(ctx):
embed1 = discord.Embed(title="Привет это все команды", description='Нажимай на стрелки и смотри команды')
embed2 = discord.Embed(title="Эмоции", description='Lt!pat(Человек) Погладить \n Lt!hug (Человек) Обнять \n Lt!spal (Человек) ударить \n Lt!kiss (Человек) поцеловать')
embed3 = discord.Embed(title="Игры", description='Lt!knb Сыграть в камень ножницы бумага \n Lt!wea (Город) Погода в городе \n Lt!wiki (запрос) Вики-Вики! \n Lt!ball (Запрос) спросить у шара \n Lt!gl (Запрос) гуглю за вас \n Lt!cat Котики! \n Lt!num_msg (Человек) посмотреть сколько у тебя сообшений' )
embed4 = discord.Embed(title="Другое", description='Lt!ava (Человек) посмотреть аву себя или человека \n Lt!ran_ava Аниме ава \n Lt!profile Свой профиль или другое \n Lt!serverinfo Инфо о сервере \n Lt!covid (Название страны в коде из двух букв) Инфо о COVID-19 ')
embeds = [embed1, embed2, embed3, embed4]
message = await ctx.send(embed=embed1)
page = Paginator(Bot, message, only=ctx.author, use_more=False, embeds=embeds)
await page.start()
@Bot.event
async def on_ready():
while True:
await Bot.change_presence(activity= discord.Activity(name=' на сервер Лататы', type= discord.ActivityType.watching))
await asyncio.sleep(10)
await Bot.change_presence(activity= discord.Game("Команда помощи Lt!help"))
await asyncio.sleep(10)
@Bot.command() # Декоратор команды
async def ava(ctx, member : discord.Member = None): # Название команды и аргументы
user = ctx.message.author if member == None else member # Проверка аргуменат и переменная участника
emb = discord.Embed( # Переменная ембеда
title=f'Аватар пользователя {user}', # Заполняем заголовок
description= f'[Ссылка на изображение]({user.avatar_url})', # Запонлняем описание
color=user.color # Устанавливаем цвет
)
emb.set_image(url=user.avatar_url) # Устанавливаем картинку
await ctx.send(embed=emb) # Отпрвака ембеда
@Bot.command()
async def knb(ctx):
solutions = ['✂️', '🧱', '📄']
winner = "**НИЧЬЯ**"
msg = await ctx.send('Выберите ход :')
for r in solutions:
await msg.add_reaction(r)
try:
react, user = await Bot.wait_for('reaction_add', timeout= 30.0, check= lambda react, user: user == ctx.author and react.message.channel == ctx.channel and react.emoji in solutions)
except asyncio.TimeoutError:
await ctx.send('Время вышло')
await msg.delete()
await ctx.message.delete()
else:
p1 = solutions.index(f'{react.emoji}')
p2 = random.randint(0, 2)
if p1 == 0 and p2 == 1 or p1 == 1 and p2 == 2 or p1 == 2 and p2 == 0:
winner = f"{ctx.message.author.mention} ты **Проиграл**"
elif p1 == 1 and p2 == 0 or p1 == 2 and p2 == 1 or p1 == 0 and p2 == 2:
winner = f"{ctx.message.author.mention} ты **Выиграл**"
await ctx.send(
f"{ctx.message.author.mention} **=>** {solutions[p1]}\n"
f"{Bot.user.mention} **=>** {solutions[p2]}\n"
f"{winner}")
await msg.delete()
await ctx.message.delete()
@Bot.command() # Декоратор команды
async def ran_ava(ctx): # Название команды
emb = discord.Embed(description= 'Вот подобраная Вам аватарка.') # Переменная ембеда и его описание
emb.set_image(url=nekos.img('avatar')) # Тут мы с помощью новой библиотеки ищем картинку на тему аватар и ставим её в ембед
await ctx.send(embed=emb) # Отпрвака ембеда
@Bot.command() # Декоратор команды
async def kiss(ctx, member : discord.Member): # Название команды и аргумент
if member == ctx.message.author: # Проверка кого упомянули
await ctx.send('Вы не можете поцеловать сами себя.')
else:
emb = discord.Embed(description= f'{member.mention}, Вас поцеловал(а) {ctx.message.author.mention}.') # Переменная ембеда и описание
emb.set_image(url=nekos.img('kiss')) # Ищем картинку и ставим её в ембед
await ctx.send(embed=emb) # Отпрвака ембед
#work
@Bot.command() # Декоратор команды
async def hug(ctx, member : discord.Member): # Название команды и аргумент
if member == ctx.message.author: # Проверка кого упомянули
await ctx.send('Вы не можете обнять сами себя.')
else:
emb = discord.Embed(description= f'{member.mention}, Вас обнял(а) {ctx.message.author.mention}.') # Переменная ембеда и описание
emb.set_image(url=nekos.img('hug')) # Ищем картинку и ставим её в ембед
await ctx.send(embed=emb) # Отпрвака ембед
#work
@Bot.command() # Декоратор команды
async def slap(ctx, member : discord.Member): # Название команды и аргумент
if member == ctx.message.author: # Проверка кого упомянули
await ctx.send('Вы не можете ударить сами себя.')
else:
emb = discord.Embed(description= f'{member.mention}, Вас ударил(а) {ctx.message.author.mention}.') # Переменная ембеда и описание
emb.set_image(url=nekos.img('slap')) # Ищем картинку и ставим её в ембед
await ctx.send(embed=emb) # Отпрвака ембед
#work
@Bot.command() # Декоратор команды
async def pat(ctx, member : discord.Member): # Название команды и аргумент
if member == ctx.message.author: # Проверка кого упомянули
await ctx.send('Вы не можете погладить сами себя.')
else:
emb = discord.Embed(description= f'{member.mention}, Вас погладил(а) {ctx.message.author.mention}.') # Переменная ембеда и описание
emb.set_image(url=nekos.img('pat')) # Ищем картинку и ставим её в ембед
await ctx.send(embed=emb) # Отпрвака ембед
@Bot.command() # Декоратор команды
async def profile(ctx, userf: discord.Member = None): # Название команды и аргумент
user = ctx.message.author if userf == None else userf # Проверка указан ли пользователь, если нет то заменяем автором команды
status = user.status # Получаем статус
if user.is_on_mobile() == True: stat = 'На телефоне' # Проверка статуса и указываем статус
if status == discord.Status.online: stat = 'В сети' # Проверка статуса и указываем статус
elif status == discord.Status.offline: stat = 'Не в сети' # Проверка статуса и указываем статус
elif status == discord.Status.idle: stat = 'Не активен' # Проверка статуса и указываем статус
elif status == discord.Status.dnd: stat = 'Не беспокоить' # Проверка статуса и указываем статус
create_time = (datetime.datetime.today()-user.created_at).days # Узнаем кол-во дней в дискорде
join_time = (datetime.datetime.today()-user.joined_at).days # Узнаем кол-во дней на сервере
emb = discord.Embed(title='Профиль', colour= user.color) # Делаем ембед и устанавливаем цвет
emb.add_field(name= 'Ник', value= user.display_name, inline= False) # Добавляем поле и заполняем
emb.add_field(name= 'ID', value= user.id, inline= False) # Добавляем поле и заполняем
if create_time == 0: # Проверка на число дней
emb.add_field(name= 'Присоиденился к дискорду', value= f'{user.created_at.strftime("%d.%m.%Y")} ( Меньше дня )', inline= False) # Добавляем поле и заполняем дни в дискорде
else:
emb.add_field(name= 'Присоиденился к дискорду', value= f'{user.created_at.strftime("%d.%m.%Y")} ( {create_time} {postfix(create_time, "день", "дня", "дней")})', inline= False)# Добавляем поле и заполняем кол-во дней в дискорде и подбираем окончание
if join_time == 0: # Проверка на число дней
emb.add_field(name= 'Присоединился к серверу', value= f'{user.joined_at.strftime("%d.%m.%Y")} ( Меньше дня )', inline= False) # Добавляем поле и заполняем дни на сервере
else:
emb.add_field(name= 'Присоединился к серверу', value= f'{user.joined_at.strftime("%d.%m.%Y")} ( {join_time} {postfix(join_time, "день", "дня", "дней")} )', inline= False) # Добавляем поле и заполняем кол-во дней на сервере и подбираем окончание
emb.add_field(name= 'Наивысшая роль', value= f"<@&{user.top_role.id}>", inline= False) # Добавляем поле и заполняем роль
emb.add_field(name= 'Статус', value= stat, inline= False) # Добавляем поле и заполняем статус
emb.set_thumbnail(url= user.avatar_url) # Устанавливаем картинку сбоку ( В душе хз как назвать xD )
await ctx.send(embed=emb)
owm = pyowm.OWM('9963f6627710292d5125e8200fc5b2b5', language= 'ru')
@Bot.command()
async def wea(ctx, *, arg):
observation = owm.weather_at_place(arg)
w = observation.get_weather()
prs = w.get_pressure()
tmp = w.get_temperature('celsius')
hmd = w.get_humidity()
cld = w.get_clouds()
wnd = w.get_wind()
wnds = wnd.get('speed')
wnds_str = ''
rn = w.get_rain()
emb = discord.Embed(
title= 'Текущая погода'
)
emb.add_field(
name= 'Температура',
value= f'{tmp.get("temp")}°'
)
emb.add_field(
name= 'Давление',
value= str(prs.get('press')) + 'мм рт.ст.'
)
emb.add_field(
name= 'Влажность',
value= str(hmd) + '%'
)
emb.add_field(
name= 'Облачность',
value= str(cld) + '%'
)
if wnds < 0.2:wnds_str = 'Штиль'
elif wnds < 1.5: wnds_str = 'Тихий'
elif wnds < 3.3: wnds_str = 'Лёгкий'
elif wnds < 5.4: wnds_str = 'Слабый'
elif wnds < 7.9: wnds_str = 'Умеренный'
elif wnds < 10.7: wnds_str = 'Свежий'
elif wnds < 13.8: wnds_str = 'Сильный'
elif wnds < 17.1: wnds_str = 'Крепкий'
elif wnds < 20.7: wnds_str = 'Очень крепкий'
elif wnds < 24.4: wnds_str = 'Шторм'
elif wnds < 28.4: wnds_str = 'Сильный шторм'
elif wnds < 32.6: wnds_str = 'Жестокий шторм'
elif wnds > 32.6: wnds_str = 'Ураган'
emb.add_field(
name= 'Степень ветра',
value= wnds_str
)
emb.add_field(
name= 'Скорость ветра',
value= str(wnds) + ' м/с'
)
emb.set_image(url= w.get_weather_icon_url())
await ctx.send(embed=emb)
@Bot.command()
async def wiki(ctx, *, text):
wikipedia.set_lang("ru")
new_page = wikipedia.page(text)
summ = wikipedia.summary(text)
emb = discord.Embed(
title= new_page.title,
description= summ
)
emb.set_author(name= 'Больше информации тут! Кликай!', url= new_page.url, icon_url= 'https://upload.wikimedia.org/wikipedia/commons/thumb/8/80/Wikipedia-logo-v2.svg/1200px-Wikipedia-logo-v2.svg.png')
await ctx.send(embed=emb)
@Bot.command()
async def serverinfo(ctx):
members = ctx.guild.members
online = len(list(filter(lambda x: x.status == discord.Status.online, members)))
offline = len(list(filter(lambda x: x.status == discord.Status.offline, members)))
idle = len(list(filter(lambda x: x.status == discord.Status.idle, members)))
dnd = len(list(filter(lambda x: x.status == discord.Status.dnd, members)))
allchannels = len(ctx.guild.channels)
allvoice = len(ctx.guild.voice_channels)
alltext = len(ctx.guild.text_channels)
allroles = len(ctx.guild.roles)
embed = discord.Embed(title=f"{ctx.guild.name}", color=0xff0000, timestamp=ctx.message.created_at)
embed.description=(
f":timer: Сервер создали **{ctx.guild.created_at.strftime('%A, %b %#d %Y')}**\n\n"
f":flag_white: Регион **{ctx.guild.region}\n\nГлава сервера **{ctx.guild.owner}**\n\n"
f":tools: Ботов на сервере: **{len([m for m in members if m.bot])}**\n\n"
f":green_circle: Онлайн: **{online}**\n\n"
f":black_circle: Оффлайн: **{offline}**\n\n"
f":yellow_circle: Отошли: **{idle}**\n\n"
f":red_circle: Не трогать: **{dnd}**\n\n"
f":shield: Уровень верификации: **{ctx.guild.verification_level}**\n\n"
f":musical_keyboard: Всего каналов: **{allchannels}**\n\n"
f":loud_sound: Голосовых каналов: **{allvoice}**\n\n"
f":keyboard: Текстовых каналов: **{alltext}**\n\n"
f":briefcase: Всего ролей: **{allroles}**\n\n"
f":slight_smile: Людей на сервере **{ctx.guild.member_count}\n\n"
)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f"ID: {ctx.guild.id}")
embed.set_footer(text=f"ID Пользователя: {ctx.author.id}")
await ctx.send(embed=embed)
@Bot.command()
async def ball(ctx, *, arg):
message = ['Нет','Да','Возможно','Опредленно нет', 'Попробуй ещё раз' ,'Даже не думай!' ,'Никогда!' ]
s = random.choice( message )
await ctx.send(embed = discord.Embed(description = f'Кот кидает шар :pouting_cat: :right_facing_fist: :8ball: \n и шар говорит ** {s}**', color=0x0c0c0c))
return
@Bot.command()
async def gl( ctx, *, question ):
url = 'https://google.gik-team.com/?q='
emb = discord.Embed( title = question, description = 'Вот чего я должен все за тебя делать?',
colour = discord.Color.green(), url = url + str(question).replace(' ', '+') )
await ctx.send( embed = emb )
@Bot.command()
async def cat(ctx):
meow = random.randint(1, 100000)
embed = discord.Embed(title='**Вот тебе котик**' ,colour=0x00ffff)
embed.set_image(url = f'https://cataas.com/cat?{meow}')
await ctx.send(embed=embed)
@Bot.command()
async def covid(ctx, code: str= None):
covid19 = COVID19Py.COVID19()
if not code:
location = covid19.getLocationByCountryCode("RU")[0]
else:
code = code.upper()
with open('country_codes.json') as h_file:
c_codes = json.load(h_file)
if not code in c_codes:
await ctx.send('Неверный код страны.')
return
location = covid19.getLocationByCountryCode(code)[0]
date = location['last_updated'].split("T")
time = date[1].split(".")
embed = discord.Embed(
title = f'Случаи заболевания COVID-19, {location["country"]}:',
description = f'''Заболевших: {location['latest']['confirmed']}\nСмертей: {location['latest']['deaths']}\n\nНаселение: {location['country_population']}\nПоследние обновление: {date[0]} {time[0]}''',
color=0x0c0c0c
)
await ctx.send(embed = embed)
class Messages:
def __init__(self, Bot):
self.Bot = Bot
async def number_messages(self, member):
n_messages = 0
for guild in self.Bot.guilds:
for channel in guild.text_channels:
try:
async for message in channel.history(limit = None):
if message.author == member:
n_messages += 1
except (discord.Forbidden, discord.HTTPException):
continue
return n_messages
@Bot.command(name = "messages")
async def num_msg(ctx, member: discord.Member = None):
user = ctx.message.author if (member == None) else member
number = await Messages(Bot).number_messages(user)
embed = discord.Embed(description = f":envelope: Количество сообщений на сервере от **{user.name}** — **{number}**!", color= 0x39d0d6)
await ctx.send(embed = embed)
token = os.environ.get('BOT_TOKEN')
Bot.run(str(token))
|
Myrvek/CatCat
|
Cat.py
|
Cat.py
|
py
| 19,363 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "discord.ext.commands.Bot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot.remove_command",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot.event",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "Cybernator.Paginator",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot.change_presence",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "discord.Activity",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.ActivityType",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "asyncio.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.change_presence",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "discord.Game",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.event",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot.wait_for",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "asyncio.TimeoutError",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.user",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "nekos.img",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "nekos.img",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "nekos.img",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "nekos.img",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "nekos.img",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "pyowm.OWM",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "wikipedia.set_lang",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "wikipedia.page",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "wikipedia.summary",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "discord.Status",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "discord.Color.green",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "discord.Color",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "COVID19Py.COVID19",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "discord.Forbidden",
"line_number": 334,
"usage_type": "attribute"
},
{
"api_name": "discord.HTTPException",
"line_number": 334,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 341,
"usage_type": "argument"
},
{
"api_name": "discord.Embed",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot.command",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot.run",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 347,
"usage_type": "name"
}
] |
23230602110
|
import ui
from PIL import Image as ImageP
import io
import random
mainWindow = ui.View()
mainWindow.name = 'Image Conversion'
mainWindow.background_color = 'white'
mainWindow.width = 700 #ui.get_screen_size().width
mainWindow.height = 700 #ui.get_screen_size().height
def pil2ui(pil_img):
with io.BytesIO() as buffer:
pil_img.save(buffer, format='PNG')
return ui.Image.from_data(buffer.getvalue())
path = "../Images/"
quarter = pil2ui(ImageP.open(path + "quarter.png").resize((70,70), ImageP.ANTIALIAS))
dime = pil2ui(ImageP.open(path + "dime.png").resize((50,50), ImageP.ANTIALIAS))
nickel = pil2ui(ImageP.open(path + "nickel.png").resize((60,60), ImageP.ANTIALIAS))
penny = pil2ui(ImageP.open(path + "penny.png").resize((55,55), ImageP.ANTIALIAS))
picture1 = ui.ImageView()
picture1.width = 70
picture1.height = 70
picture1.image = quarter
mainWindow.add_subview(picture1)
#mainWindow.present('fullscreen')
mainWindow.present('sheet')
|
WhittlinRich/python
|
ImageConversion.py
|
ImageConversion.py
|
py
| 955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "ui.View",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ui.Image.from_data",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ui.Image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "ui.ImageView",
"line_number": 25,
"usage_type": "call"
}
] |
12465932204
|
from flask import Flask, flash, redirect,\
url_for, session, g
from functools import wraps
from database import connect_db, query_db
from tigergrader.config import cfg
app = Flask(__name__)
app.config.update(cfg)
class GraderConfiguration():
def __setitem__(self, k, v):
g.db.execute('replace into configuration values (?, ?)', (k, v))
g.db.commit()
def __getitem__(self, k):
v = query_db('select value from configuration where key == ?', [k])
if v and "value" in v[0]:
return v[0]["value"]
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.db.close()
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "username" not in session:
flash('You need to log in')
return redirect(url_for('login'))
elif session["username"] == app.config["ADMIN_USERNAME"]:
return redirect(url_for('admin'))
return f(*args, **kwargs)
return decorated_function
def get_active_modules():
conf = GraderConfiguration()
active_modules = conf["active_modules"]
if active_modules:
active_modules = active_modules.split(",")
else:
active_modules = []
return active_modules
import tigergrader.admin
import tigergrader.student
|
pablooliveira/tigergrader
|
tigergrader/__init__.py
|
__init__.py
|
py
| 1,408 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tigergrader.config.cfg",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "flask.g.db.execute",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.g.db.commit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "database.query_db",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "database.connect_db",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "flask.g.db.close",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.g.db",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 37,
"usage_type": "call"
}
] |
72859056509
|
import os
import tempfile
from functools import partial
from jinja2 import Environment, FunctionLoader
from babel.dates import format_date, format_datetime
from babel.numbers import format_currency
try:
import weasyprint
except ImportError:
pass
from genshi.template import MarkupTemplate
from trytond.tools import file_open
from trytond.pool import Pool
from trytond.transaction import Transaction
from trytond.report import Report, TranslateFactory, Translator
from executor import execute
class ReportWebkit(Report):
render_method = "webkit"
@classmethod
def render(cls, report, report_context):
pool = Pool()
Translation = pool.get('ir.translation')
Company = pool.get('company.company')
# Convert to str as buffer from DB is not supported by StringIO
report_content = (str(report.report_content) if report.report_content
else False)
if not report_content:
raise Exception('Error', 'Missing report file!')
translate = TranslateFactory(cls.__name__, Transaction().language,
Translation)
report_context['setLang'] = lambda language: translate.set_language(
language)
company_id = Transaction().context.get('company')
report_context['company'] = Company(company_id)
return cls.render_template(report_content, report_context, translate)
@classmethod
def convert(cls, report, data):
# Convert the report to PDF if the output format is PDF
# Do not convert when report is generated in tests, as it takes
# time to convert to PDF due to which tests run longer.
# Pool.test is True when running tests.
output_format = report.extension or report.template_extension
if output_format == "html" or Pool.test:
return output_format, data
elif cls.render_method == "webkit":
return output_format, cls.wkhtml_to_pdf(data)
elif cls.render_method == "weasyprint":
return output_format, cls.weasyprint(data)
@classmethod
def render_template_genshi(cls, template_string, localcontext, translator):
"""
Legacy genshi rendered for backward compatibility. If your report is
still dependent on genshi, implement the method render_template in
your custom report and call this method with the same arguments and
return the value instead.
"""
report_template = MarkupTemplate(template_string)
# Since Genshi >= 0.6, Translator requires a function type
report_template.filters.insert(
0, Translator(lambda text: translator(text))
)
stream = report_template.generate(**localcontext)
return stream.render('xhtml').encode('utf-8')
@classmethod
def jinja_loader_func(cls, name):
"""
Return the template from the module directories using the logic
below:
The name is expected to be in the format:
<module_name>/path/to/template
for example, if the account_reports module had a base template in
its reports folder, then you should be able to use:
{% extends 'account_reports/report/base.html' %}
"""
module, path = name.split('/', 1)
try:
with file_open(os.path.join(module, path)) as f:
return f.read()
except IOError:
return None
@classmethod
def get_jinja_filters(cls):
"""
Returns filters that are made available in the template context.
By default, the following filters are available:
* dateformat: Formats a date using babel
* datetimeformat: Formats a datetime using babel
* currencyformat: Formats the given number as currency
* modulepath: Returns the absolute path of a file inside a
tryton-module (e.g. sale/sale.css)
For additional arguments that can be passed to these filters,
refer to the Babel `Documentation
<http://babel.edgewall.org/wiki/Documentation>`_.
"""
def module_path(name):
module, path = name.split('/', 1)
with file_open(os.path.join(module, path)) as f:
return 'file://' + f.name
return {
'dateformat': partial(format_date, locale=Transaction().language),
'datetimeformat': partial(
format_datetime, locale=Transaction().language
),
'currencyformat': partial(
format_currency, locale=Transaction().language
),
'modulepath': module_path
}
@classmethod
def get_environment(cls):
"""
Create and return a jinja environment to render templates
Downstream modules can override this method to easily make changes
to environment
"""
env = Environment(loader=FunctionLoader(cls.jinja_loader_func))
env.filters.update(cls.get_jinja_filters())
return env
@classmethod
def render_template(cls, template_string, localcontext, translator):
"""
Render the template using Jinja2
"""
env = cls.get_environment()
# Update header and footer in context
company = localcontext['company']
localcontext.update({
'header': env.from_string(company.header_html or ''),
'footer': env.from_string(company.footer_html or ''),
})
report_template = env.from_string(template_string.decode('utf-8'))
return report_template.render(**localcontext).encode('utf-8')
@classmethod
def weasyprint(cls, data, options=None):
return weasyprint.HTML(string=data).write_pdf()
@classmethod
def wkhtml_to_pdf(cls, data, options=None):
"""
Call wkhtmltopdf to convert the html to pdf
"""
with tempfile.NamedTemporaryFile(
suffix='.html', prefix='trytond_', delete=False
) as source_file:
file_name = source_file.name
source_file.write(data)
source_file.close()
# Evaluate argument to run with subprocess
args = 'wkhtmltopdf'
# Add Global Options
if options:
for option, value in options.items():
args += ' --%s' % option
if value:
args += ' "%s"' % value
# Add source file name and output file name
args += ' %s %s.pdf' % (file_name, file_name)
# Execute the command using executor
execute(args)
return open(file_name + '.pdf').read()
|
openlabs/trytond-report-html
|
openlabs_report_webkit/__init__.py
|
__init__.py
|
py
| 6,758 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "trytond.report.Report",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "trytond.pool.Pool",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "trytond.report.TranslateFactory",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "trytond.pool.Pool.test",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "trytond.pool.Pool",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "genshi.template.MarkupTemplate",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "trytond.report.Translator",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "trytond.tools.file_open",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "trytond.tools.file_open",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "babel.dates.format_date",
"line_number": 126,
"usage_type": "argument"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "babel.dates.format_datetime",
"line_number": 128,
"usage_type": "argument"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "babel.numbers.format_currency",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "trytond.transaction.Transaction",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "jinja2.FunctionLoader",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "weasyprint.HTML",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "executor.execute",
"line_number": 192,
"usage_type": "call"
}
] |
27391305853
|
import uuid
from fastapi import status
from fastapi.testclient import TestClient
from httpx import Response
from internal.types import ScopedSession
from models.chat import Room
def test_POST_create_new_room_api_success_should_return_200(client: TestClient) -> None:
response: Response = client.post("/rooms", json={"name": "New room", "creator_id": None})
assert response.status_code == status.HTTP_200_OK
def test_GET_get_all_rooms_api_success_should_return_200(client: TestClient, db_session) -> None:
for i in range(10):
enc_key: str = uuid.uuid4().hex
room = Room(name=f"Room {i}", slug=f"room-{i}", encryption_key=enc_key)
db_session.add(room)
db_session.commit()
response: Response = client.get("/rooms")
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 10
def test_POST_join_existing_room_api_success_should_return_200(
client: TestClient, db_session: ScopedSession
) -> None:
room = Room(name="Room", slug="room", encryption_key=uuid.uuid4().hex)
db_session.add(room)
db_session.commit()
response: Response = client.post("/rooms/join", json={"name": room.name})
assert response.status_code == status.HTTP_200_OK
def test_POST_join_existing_room_api_failed_should_return_404(
client: TestClient, db_session: ScopedSession
) -> None:
response: Response = client.post("/rooms/join", json={"name": "Some name"})
assert response.status_code == status.HTTP_404_NOT_FOUND
|
ebysofyan/dcentric-health-hometest
|
chatroom-backend/tests/test_chat_room_api.py
|
test_chat_room_api.py
|
py
| 1,506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "httpx.Response",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.chat.Room",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "httpx.Response",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "internal.types.ScopedSession",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "models.chat.Room",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "httpx.Response",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "internal.types.ScopedSession",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "httpx.Response",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "fastapi.status.HTTP_404_NOT_FOUND",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 43,
"usage_type": "name"
}
] |
26798311383
|
import random
import cv2
import cvzone
import time
from cvzone.HandTrackingModule import HandDetector
cap = cv2.VideoCapture(0)
cap.set(3, 1080)
cap.set(4, 720)
detector=HandDetector(maxHands=1)
timer=0
stateResult=False
startGame=False
scores =[0,0]
while True:
bg_main = cv2.imread('F:/rps/New folder/BG.png')
success, img = cap.read()
imgScaled = cv2.resize(img,(0,0),None,0.7222, 0.7222)
imgScaled = imgScaled[:, 3:515]
imgScaled= cv2.resize(imgScaled,(418,388))
#finds Hands
hands, img = detector.findHands(imgScaled)
if startGame:
if stateResult is False:
timer = time.time()- initialTime
cv2.putText(bg_main, str(int(timer)),(605,435),cv2.FONT_HERSHEY_PLAIN ,6,(255,0,255),4)
if timer>3:
stateResult = True
timer= 0
if hands:
playermove = None
hand = hands[0]
fingers = detector.fingersUp(hand)
if fingers == [0,0,0,0,0]:
playermove =1
if fingers == [1,1,1,1,1]:
playermove = 2
if fingers ==[0,1,1,0,0]:
playermove = 3
randomNumber = random.randint(1,3)
imgai= cv2.imread(f"F:/rps/New folder/data/{randomNumber}.png",cv2.IMREAD_UNCHANGED)
bg_main = cvzone.overlayPNG(bg_main, imgai ,(235,280))
# player Win
if (playermove == 1 and randomNumber == 3) or\
(playermove == 2 and randomNumber == 1) or\
(playermove == 3 and randomNumber == 2):
scores[1] +=1
# ai Win
if (playermove == 3 and randomNumber == 1) or\
(playermove == 1 and randomNumber == 2) or\
(playermove == 2 and randomNumber == 3):
scores[0] +=1
bg_main[114:502,756:1174]= imgScaled
print(imgScaled.shape)
print(bg_main[114:502, 756:1174].shape)
if stateResult:
bg_main = cvzone.overlayPNG(bg_main, imgai ,(235,280))
cv2.putText(bg_main, str(scores[0]),(410,215),cv2.FONT_HERSHEY_PLAIN ,4,(255,255,255),6)
cv2.putText(bg_main, str(scores[1]),(1112,215),cv2.FONT_HERSHEY_PLAIN ,4,(255,255,255),6)
#cv2.imshow("image" , img)
cv2.imshow("BG" ,bg_main)
#cv2.imshow("scaled", imgScaled)
key=cv2.waitKey(1)
if key == ord('s'):
startGame = True
initialTime = time.time()
stateResult = False
|
123dhanya/final-project
|
you.py
|
you.py
|
py
| 2,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cvzone.HandTrackingModule.HandDetector",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "cvzone.overlayPNG",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cvzone.overlayPNG",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 97,
"usage_type": "call"
}
] |
37061565332
|
import os
import sys
import torch
import torchvision.models
from torch import optim
from torch import nn
from tqdm import tqdm
from collections import Counter
from easydict import EasyDict as edict
from tools import CustomDataSet, Averagemeter, Speedometer, print_one_line, model_size_mb, ROCEstimator
from torch.utils.tensorboard import SummaryWriter
from torch.cuda import amp
import resnet
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# --------- CONFIG -----------
os.environ['TORCH_HOME'] = './weights'
cfg = edict()
cfg.train_in_fp16 = True
cfg.crop_size = (256, 256)
cfg.batch_size = 32
cfg.grad_accum_batches = 4
cfg.num_epochs = 32
cfg.num_classes = 2
cfg.augment = True
cfg.model_name = "effnet_v2_s"
cfg.pretrained = True
cfg.labels_smoothing = 0.05
cfg.max_batches_per_train_epoch = -1 # -1 - all
crop_format = '256x60x0.1' if cfg.crop_size[0] == 256 else '224x90x0.2'
local_path = f"/home/alex/Fastdata/deepfakes/sequence/{crop_format}"
for key in cfg:
print(f" - {key}: {cfg[key]}")
# ---------- DNN --------------
if cfg.model_name == "effnet_v2_s":
model = torchvision.models.efficientnet_v2_s()
if cfg.pretrained:
model.load_state_dict(torchvision.models.efficientnet_v2_s(
weights=torchvision.models.EfficientNet_V2_S_Weights.IMAGENET1K_V1).state_dict())
model.classifier[1] = nn.Linear(in_features=1280, out_features=cfg.num_classes, bias=True)
max_lr = 0.001
elif cfg.model_name == "resnet18-ir":
model = resnet.ResNetFaceGray(block=resnet.IRBlock, layers=[2, 2, 2, 2], use_se=True, attention=False,
output_features=cfg.num_classes)
max_lr = 0.01
model = model.to(device)
# model = nn.DataParallel(model)
print(f" - model size: {model_size_mb(model):.3f} MB")
# Loss and optimizer
loss_fn = nn.CrossEntropyLoss(label_smoothing=cfg.labels_smoothing)
optimizer = optim.Adam(model.parameters(), lr=max_lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=2, factor=0.2, min_lr=0.0001,
verbose=True)
# ----------------------------
writer = SummaryWriter(filename_suffix=cfg.model_name)
print("Train dataset:")
train_dataset = CustomDataSet([
f"{local_path}/roop",
f"{local_path}/FaceForensics++",
f"{local_path}/Celeb-DF-v2",
f"{local_path}/dfdc",
], cfg.crop_size, do_aug=cfg.augment)
print(f" {train_dataset.labels_names()}")
assert cfg.num_classes == len(train_dataset.labels_names())
lbls_count = dict(Counter(train_dataset.targets))
print(f" {lbls_count}")
class_weights = list(1 / torch.Tensor(list(lbls_count.values())))
samples_weights = [class_weights[lbl] for lbl in train_dataset.targets]
sampler = torch.utils.data.WeightedRandomSampler(weights=samples_weights, num_samples=len(train_dataset),
replacement=True)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=sampler, batch_size=cfg.batch_size, num_workers=8)
alive_lbl = None
for key in train_dataset.labels_names():
if train_dataset.labels_names()[key] == 'live':
alive_lbl = key
break
print("Test dataset:")
test_dataset = CustomDataSet([
f"{local_path}/toloka"
], cfg.crop_size, do_aug=False)
print(f" {test_dataset.labels_names()}")
print(f" {dict(Counter(test_dataset.targets))}")
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=6)
metrics = {
'train': {'EER': float('inf'), 'loss': float('inf'), '[email protected]': float('inf')},
'test': {'EER': float('inf'), 'loss': float('inf'), '[email protected]': float('inf')}
}
def update_metrics(mode, epoch, running_loss, roc_estimator):
if not os.path.exists('./weights'):
os.makedirs('./weights')
writer.add_scalar(f"Loss/{mode}", running_loss, epoch)
if running_loss < metrics[mode]['loss']:
metrics[mode]['loss'] = running_loss
print(f" - loss: {running_loss:.5f} - improvement")
else:
print(f" - loss: {running_loss:.5f}")
eer, err_s = roc_estimator.estimate_eer()
writer.add_scalar(f"EER/{mode}", eer, epoch)
if eer < metrics[mode]['EER']:
metrics[mode]['EER'] = eer
if mode == 'test':
torch.save(model, f"./weights/tmp_{cfg.model_name}@{crop_format}.pth")
print(f" - EER: {eer:.4f} (score: {err_s:.3f}) - improvement")
else:
print(f" - EER: {eer:.4f} (score: {err_s:.3f})")
print(f" - [email protected]: {roc_estimator.estimate_bpcer(target_apcer=0.1):.4f}")
bpcer01 = roc_estimator.estimate_bpcer(target_apcer=0.01)
print(f" - [email protected]: {bpcer01:.4f}")
writer.add_scalar(f"[email protected]/{mode}", bpcer01, epoch)
print(f" - [email protected]: {roc_estimator.estimate_bpcer(target_apcer=0.001):.4f}")
loss_avgm = Averagemeter()
ae_avgm = Averagemeter()
speedometer = Speedometer()
scaler = amp.grad_scaler.GradScaler()
train_roc_est = ROCEstimator()
test_roc_est = ROCEstimator()
def train(epoch, dataloader):
print("TRAIN:")
train_roc_est.reset()
loss_avgm.reset()
ae_avgm.reset()
speedometer.reset()
model.train()
running_loss = 0
true_positive_live = 0
false_positive_live = 0
true_negative_live = 0
false_negative_live = 0
samples_enrolled = 0
for batch_idx, (inputs, labels) in enumerate(dataloader):
if batch_idx == cfg.max_batches_per_train_epoch:
break
inputs = inputs.to(device)
labels = labels.to(device)
#u,c = torch.unique(labels, return_counts=True)
#print(u,c)
if cfg.train_in_fp16:
with amp.autocast():
outputs = model(inputs)
loss = loss_fn(outputs, labels)
loss = loss / cfg.grad_accum_batches
scaler.scale(loss).backward()
if (batch_idx + 1) % cfg.grad_accum_batches == 0 or batch_idx == (len(dataloader) - 1):
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
outputs = model(inputs)
loss = loss_fn(outputs, labels)
loss = loss / cfg.grad_accum_batches
loss.backward()
if (batch_idx + 1) % cfg.grad_accum_batches == 0 or batch_idx == (len(dataloader) - 1):
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
optimizer.zero_grad()
with torch.no_grad():
scores = torch.nn.functional.softmax(outputs, dim=1)
train_roc_est.update(live_scores=scores[labels == alive_lbl, alive_lbl].tolist(),
attack_scores=scores[labels != alive_lbl, alive_lbl].tolist())
running_loss += loss.item()
_, predicted = outputs.max(1)
_tp_live = (predicted[labels == alive_lbl] == alive_lbl).sum().item()
true_positive_live += _tp_live
_fp_live = (predicted[labels != alive_lbl] == alive_lbl).sum().item()
false_positive_live += _fp_live
_tn_live = (predicted[labels != alive_lbl] != alive_lbl).sum().item()
true_negative_live += _tn_live
_fn_live = (predicted[labels == alive_lbl] != alive_lbl).sum().item()
false_negative_live += _fn_live
loss_avgm.update(loss.item())
ae_avgm.update((_fp_live / (_fp_live + _tn_live + 1E-6) + _fn_live / (_fn_live + _tp_live + 1E-6)) / 2)
samples_enrolled += labels.size(0)
speedometer.update(labels.size(0))
print_one_line(
f'Epoch {epoch} >> loss {loss_avgm.val:.3f}, AE {ae_avgm.val * 100:.2f}%, '
f'{samples_enrolled}/{len(train_dataset)} ~ '
f'{100 * samples_enrolled / len(train_dataset):.1f} % | '
f'{speedometer.speed():.0f} samples / s '
)
update_metrics('train', epoch, running_loss / batch_idx, train_roc_est)
def test(epoch, dataloader):
print("TEST:")
test_roc_est.reset()
model.eval()
running_loss = 0
with torch.no_grad():
for batch_idx, (inputs, labels) in enumerate(tqdm(dataloader, file=sys.stdout)):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
running_loss += loss.item()
_, predicted = outputs.max(1)
scores = torch.nn.functional.softmax(outputs, dim=1)
test_roc_est.update(live_scores=scores[labels == alive_lbl, alive_lbl].tolist(),
attack_scores=scores[labels != alive_lbl, alive_lbl].tolist())
update_metrics('test', epoch, running_loss / len(dataloader), test_roc_est)
scheduler.step(metrics['test']['EER'])
print("\n")
for epoch in range(cfg.num_epochs):
train(epoch, train_dataloader)
test(epoch, test_dataloader)
|
pi-null-mezon/antideepfakes
|
singleshot/train.py
|
train.py
|
py
| 9,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "easydict.EasyDict",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torchvision.models.models.efficientnet_v2_s",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torchvision.models.models",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torchvision.models.models.efficientnet_v2_s",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvision.models.models",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torchvision.models.models",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "resnet.ResNetFaceGray",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "resnet.IRBlock",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "tools.model_size_mb",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tools.CustomDataSet",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.WeightedRandomSampler",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "tools.CustomDataSet",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tools.Averagemeter",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tools.Averagemeter",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tools.Speedometer",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.grad_scaler.GradScaler",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.grad_scaler",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.amp",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "tools.ROCEstimator",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tools.ROCEstimator",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp.autocast",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.cuda.amp",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "tools.print_one_line",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 219,
"usage_type": "attribute"
}
] |
71261723068
|
# -*- coding: utf-8 -*-
__all__ = ('random_policy', 'stdin_policy', 'imitation_policy', 'inference_policy')
from utils.distribution import sample_by_mask
from utils.match_data import MatchData
from utils.tile_traits import str2tile, tile2str
from torch import nn
import numpy as np
class random_policy:
def __init__(self):
pass
def __call__(self, obs):
return sample_by_mask(obs[1])
class stdin_policy:
def __init__(self, env):
self.env = env
def __call__(self, obs):
assert self.env.verbose, "To use stdin policy, you must set env to be verbose."
self.env.render()
if obs[1].astype(np.uint8).sum() == 1:
return obs[1].nonzero()[0]
return input('> ')
class inference_policy:
def __init__(self, match_lines):
self.inferred = [[], [], [], []]
self.match_lines = match_lines[6:-2]
self.idx = 0
self.match_id = match_lines[0].split()[-1]
self.quan = int(match_lines[1].split()[-1])
self.tile_wall = [[], [], [], []]
self.fan_info = match_lines[-2]
self.scores = list(map(int, match_lines[-1].split()[1:]))
self.translate_fn = None
self.last = None
self.count = 0
for i in range(4):
self.tile_wall[i] = match_lines[2+i].split()[3:]
no_ignored = []
for idx, line in enumerate(self.match_lines):
lst = line.split()
if lst[2] == "Draw":
self.tile_wall[int(lst[1])].append(lst[3])
elif lst[2] == "Chi":
lst[2] = "rChi"
if 'Ignore' in lst:
subset = [' '.join(lst[:lst.index('Ignore')])]
while 'Ignore' in lst:
j = lst.index('Ignore')
subset.append(' '.join(lst[j+1:j+4] + [lst[3]]))
lst = lst[:j] + lst[j+4:]
subset.sort(key=lambda x:int(x[len('Player x') - 1]))
no_ignored += subset
else:
no_ignored.append(' '.join(lst))
self.match_lines = no_ignored + ['PLAY 4 PASS PASS']
for i in range(4):
self.tile_wall[i] = self.tile_wall[i] + ['??'] * (34 - len(self.tile_wall[i]))
assert sum([len(self.tile_wall[i]) == 34 for i in range(4)]) == 4
def __call__(self, obs):
players = obs[2].reshape(-1)
mask = obs[1].reshape(players.shape[0], -1)
self.count += (mask.sum(1) > 1).sum() * 4 * 9 * 145 / 1024 / 1024
lst = self.match_lines[self.idx].split()
while lst[2] == 'Draw':
self.last = None
self.idx += 1
lst = self.match_lines[self.idx].split()
next_player = int(lst[1])
next_last_tile = lst[3] if lst[2] == 'Play' else None
for idx, player in enumerate(players):
if player == next_player:
action = self.valid_to_do(mask[idx], lst[2], lst[3])
if action is not None:
self.inferred[player].append(action)
self.idx += 1
lst = self.match_lines[self.idx].split()
next_player = int(lst[1])
continue
self.inferred[player].append(self.translate_fn('PASS'))
if next_last_tile:
self.last = next_last_tile
if players.shape[0] == 1:
return self.inferred[players[0]][-1]
return np.asarray([self.inferred[player][-1] for player in players])
def valid_to_do(self, mask, action, tile):
action = action.upper()
if action in ['DRAW', 'PASS']:
return None
if action == 'CHI':
if tile[1] <= '7':
try_action = self.translate_fn(action + ' ' + tile[0] + chr(ord(tile[1]) + 1), self.last)
if mask[try_action]:
return try_action
if '2' <= tile[1] <= '8':
try_action = self.translate_fn(action + ' ' + tile, self.last)
if mask[try_action]:
return try_action
if '3' <= tile[1]:
try_action = self.translate_fn(action + ' ' + tile[0] + chr(ord(tile[1]) - 1), self.last)
if mask[try_action]:
return try_action
return None
if action == 'RCHI':
action = 'CHI'
if action in ['CHI', 'BUGANG', 'PLAY']:
action += ' ' + tile
elif action == 'GANG' and self.last is None:
action += ' ' + tile
try_action = self.translate_fn(action, self.last)
if not mask[try_action]:
return None
return try_action
def as_match_data(self, id=0):
tile_wall = sum(map(lambda x:list(reversed(x)), self.tile_wall), [])
tile_wall = np.asarray([34 if tile == '??' else str2tile[tile] for tile in tile_wall], dtype=np.uint8)
return MatchData(
match_id=self.match_id,
id=id,
quan=self.quan,
tile_wall=tile_wall,
actions=list(map(lambda x:np.asarray(x, dtype=np.uint8), self.inferred)),
final_info=self.fan_info,
scores=np.asarray(self.scores, dtype=np.int32)
)
class imitation_policy:
def __init__(self, match):
self.actions = np.zeros((4, max([match.actions[i].shape[0] for i in range(4)])), dtype=np.uint8)
for i in range(4):
self.actions[i, :match.actions[i].shape[0]] = match.actions[i]
self.idxs = np.asarray([0] * 4)
def __call__(self, obs):
action = self.actions[obs[2], self.idxs[obs[2]]]
self.idxs[obs[2]] += 1
return action
|
illusive-chase/ChineseStandardMahjong
|
utils/policy.py
|
policy.py
|
py
| 5,871 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "utils.distribution.sample_by_mask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "utils.tile_traits.str2tile",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "utils.match_data.MatchData",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 144,
"usage_type": "call"
}
] |
10343753605
|
# -*- coding: utf-8 -*-
"""
Chapter 02 script 02
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
#should see 2.0.0 or higher1.0
print(keras.__version__)
#should see 2.2.4 – tf
# the four different states of the XOR gate
training_data = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
# the four expected results in the same order
target_data = np.array([[0],[1],[1],[0]], "float32")
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[2]))
model.add(keras.layers.Dense(16, activation = 'sigmoid'))
model.add(keras.layers.Dense(16, activation = 'sigmoid'))
model.add(keras.layers.Dense(1, activation = 'sigmoid'))
model.summary()
model.layers
hidden1 = model.layers[1]
hidden1.name
model.get_layer('dense') is hidden1
weights, biases = hidden1.get_weights()
print(weights)
print(weights.shape)
weights.shape
biases
biases.shape
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['binary_accuracy'])
history = model.fit(training_data, target_data, epochs=750, verbose=2)
import pandas as pd
import matplotlib.pyplot as plt
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
print(model.predict(training_data).round())
|
sgupta117/ML-Material-Krish_Naik-
|
Natural-Lanuage-Processing-with-TensorFlow2-master/Chapter_02/chapter_02_03.py
|
chapter_02_03.py
|
py
| 1,339 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "tensorflow.__version__",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.__version__",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
}
] |
4947454827
|
import uuid
import datetime
import json
from oscal_pydantic import (
catalog,
assessment_plan,
assessment_results,
complete,
component,
poam,
profile,
ssp,
)
cg_uuid=uuid.uuid5(uuid.NAMESPACE_DNS, 'cloud.gov')
ssp_au_01 = ssp.SpecificControlStatement(
statement_id="au_01_smt",
uuid = str(uuid.uuid5(cg_uuid, 'ssp_au_01')),
remarks = "This is a remark"
)
print(ssp_au_01.json())
|
pburkholder/oscal-pydantic
|
ssp.py
|
ssp.py
|
py
| 427 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "uuid.uuid5",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "uuid.NAMESPACE_DNS",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "oscal_pydantic.ssp.SpecificControlStatement",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "oscal_pydantic.ssp",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "uuid.uuid5",
"line_number": 20,
"usage_type": "call"
}
] |
74523936828
|
#!/usr/bin/python3
"""
script that takes your GitHub credentials
"""
import requests
from sys import argv
def getgithub():
url = "https://api.github.com/user"
response = requests.get(url, auth=(argv[1], argv[2]))
response = response.json()
print(response.get("id"))
if __name__ == "__main__":
getgithub()
|
BrunoGuerraS/holbertonschool-higher_level_programming
|
0x11-python-network_1/10-my_github.py
|
10-my_github.py
|
py
| 332 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "name"
}
] |
40546855652
|
# chat/consumers.py
import base64
import random
from asgiref.sync import async_to_sync
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.db import database_sync_to_async
import json
import time
from face_machine_client.models import PassInfo, ClientInfo, SyncInfo
from face_irobot_main.models import FaceStore
import string
import random
def create_client_id(randomlength=15):
"""
生成client_id
"""
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(base_str) - 1
for i in range(randomlength):
random_str += base_str[random.randint(0, length)]
return random_str
class FaceWebsocket(AsyncWebsocketConsumer):
"""
人脸后台websocket服务
"""
def __init__(self, *args, **kwargs):
# 初始化参数
super().__init__(args, kwargs)
self.client_user = None
self.client_id = None
self.company_id = None
async def connect(self):
# 初始化连接
await self.accept()
# 生成本次连接的client_id
self.client_id = create_client_id()
await self.send(text_data=json.dumps(
{
'type': 'init',
'message': 'connected',
'error_code': 0,
'client_id': self.client_id # 返还client_id,用于判断是否是本次连接
}))
async def disconnect(self, close_code):
await self.send(text_data=json.dumps(
{
'message': 'disconnect'
}))
async def receive(self, text_data=None, bytes_data=None):
# 处理websocket
text_data_json = json.loads(text_data)
message_type = text_data_json.get('type')
# 登录进程
if message_type == "authenticate":
# 设备认证
client_user = text_data_json.get('client_user')
client_key = text_data_json.get('client_key')
# 校验连接是否有效
if text_data_json.get('client_id') == self.client_id:
# 异步database
results = await database_sync_to_async(auth_client_token)(client_user, client_key)
# 登录成功
if results[0] == 'ok':
self.company_id = results[2]
self.client_user = client_user
await self.send(text_data=json.dumps(
{
# 返还登录状态与设备参数
'message': 'auth_success',
'client_token': results[1],
'company_id': results[2]
}))
# 加入到相同企业通信通道中,用于主动下发参数
await self.channel_layer.group_add(
str(self.company_id),
self.channel_name
)
# 加入设备单独通信通道,用于点对点下发参数
await self.channel_layer.group_add(
str(client_user),
self.channel_name
)
# 登录失败
else:
await self.send(text_data=json.dumps(
{
'message': 'auth_fail'
}))
await self.close()
else:
await self.send(text_data=json.dumps(
{
'message': 'client_id_error'
}))
await self.close()
# 通行日志
elif message_type == "pass_info":
if self.company_id:
info_detail = text_data_json.get("info_detail")
task_id = text_data_json.get("task_id")
update_results = await database_sync_to_async(update_pass_info)(info_detail_list=info_detail,
task_id=task_id,
client_user=self.client_user
)
await self.send(text_data=json.dumps(update_results))
else:
await self.send(text_data=json.dumps(
{
'message': 'need_auth'
}))
# 同步人脸数据库
elif message_type == "sync_database":
if self.company_id:
data_sync_type = text_data_json.get('sync_type')
# 全同步模式
if data_sync_type == "full_sync":
resp_data = await database_sync_to_async(get_face_store)(client_user=self.client_user,
data_sync_type=data_sync_type,
company_id=self.company_id)
await self.send(text_data=json.dumps(resp_data))
# 检验同步模式,设备发送本地数据库人员列表进行数据量比对
elif data_sync_type == "check_sync":
check_sync_list = text_data_json.get("check_sync_list")
resp_data = await database_sync_to_async(get_face_store)(client_user=self.client_user,
data_sync_type=data_sync_type,
company_id=self.company_id,
check_sync_list=check_sync_list)
await self.send(text_data=json.dumps(resp_data))
# 确认比对状态
elif data_sync_type == "check_finished":
check_finished_list = text_data_json.get("check_finished_list")
resp_data = await database_sync_to_async(get_face_store)(client_user=self.client_user,
data_sync_type=data_sync_type,
company_id=self.company_id,
check_sync_list=check_finished_list)
await self.send(text_data=json.dumps(resp_data))
# 指定人员同步
elif data_sync_type == "single_sync":
staff_id_list = text_data_json.get('staff_id_list')
resp_data = await database_sync_to_async(get_face_store)(data_sync_type=data_sync_type,
staff_id_list=staff_id_list)
await self.send(text_data=json.dumps(resp_data))
else:
await self.send(text_data=json.dumps(
{
'message': 'need_auth'
}))
# 同步状态
elif message_type == "sync_info":
if self.company_id:
sync_state = text_data_json.get('sync_state')
state = sync_state.get("state")
face_data_len = sync_state.get("face_data_len")
success_sync = sync_state.get("success_sync")
# 判断同步类型
if sync_state['type'] == "full_sync":
sync_type = "full_sync"
elif sync_state['type'] == 'single_sync':
sync_type = "single"
elif sync_state['type'] == 'check_sync':
sync_type = 'check_sync'
face_data_len = 0
success_sync = 0
else:
sync_type = ''
# 进行数据库同步
await database_sync_to_async(update_sync_state)(sync_type=sync_type,
sync_state=state,
client_user=self.client_user,
face_data_len=face_data_len,
success_sync=success_sync)
else:
await self.send(text_data=json.dumps(
{
'message': 'need_auth'
}))
# 向设备下发通知
async def get_notification(self, event):
message = event['message']
# 异步下发
await self.send(text_data=json.dumps({
'message': message
}))
def auth_client_token(client_user, client_key):
"""
设备认证
"""
# 在数据库中检索设备号与认证码
auth = ClientInfo.objects.filter(client_user=client_user, client_key=client_key)
# print('auth', auth)
auth_state = auth.count()
# print('auth_state', auth_state)
if auth_state >= 1:
# 生成设备唯一token
auth.update(client_token=create_token())
return 'ok', auth.get().client_token, auth.get().client_info.company_id
else:
return 'error', None
def create_token():
"""
生成Token
"""
token = 'time=' + str(int(time.time())) + 'num=' + str(random.randint(1000, 9999))
token = base64.b64encode(token.encode('utf-8')).decode('utf-8')
print('token', token)
return token
def get_face_store(client_user, data_sync_type, company_id=None, staff_id_list=None, check_sync_list=None):
"""
获取人脸数据库
"""
if check_sync_list is None:
check_sync_list = []
resp_data = {
'type': data_sync_type,
'face_store': []
}
# 全同步方式
if data_sync_type == "full_sync":
full_store = FaceStore.objects.filter(company_id=company_id)
for single_face in full_store:
single_data = {'name': single_face.name,
'phone': single_face.phone,
'staff_id': single_face.staff_id,
'company_id': single_face.company_id,
'face_code': str(single_face.face_code)}
resp_data['face_store'].append(single_data)
print(len(resp_data['face_store']))
return resp_data
# 指定人员同步
elif data_sync_type == "single_sync":
for single_staff in staff_id_list:
single_face = FaceStore.objects.filter(staff_id=single_staff).get()
print('single_face', single_face)
single_data = {'name': single_face.name,
'phone': single_face.phone,
'staff_id': single_face.staff_id,
'company_id': single_face.company_id,
'face_code': str(single_face.face_code)}
resp_data['face_store'].append(single_data)
print(len(resp_data['face_store']))
return resp_data
# 人员列表检查同步
elif data_sync_type == "check_sync":
local_staff_list = []
for x in FaceStore.objects.filter(company_id=company_id):
local_staff_list.append(x.staff_id)
print('local_staff_list', local_staff_list)
if check_sync_list:
check_sync_list = check_sync_list
else:
check_sync_list = []
update_list = []
for check_staff in local_staff_list:
if check_staff not in check_sync_list:
update_list.append(check_staff)
# 检测数据库改动(人脸更新,人脸删除)
for k in FaceStore.objects.filter(company_id=company_id):
local_client_list = eval(k.client_sync_list)
print('local_client_list', local_client_list)
if client_user in local_client_list:
pass
else:
update_list.append(k.staff_id)
print('update_list', update_list)
resp_data['update_list'] = update_list
return resp_data
# 确认比对状态
elif data_sync_type == "check_finished":
try:
for single_staff in check_sync_list:
local_client_list = FaceStore.objects.filter(staff_id=single_staff).get().client_sync_list
new_local_client_list = eval(local_client_list)
# print('new_local_client_list', new_local_client_list)
new_local_client_list.append(client_user)
# print('new_local_client_list_return', new_local_client_list)
# 设备号去重
new_local_client_list = list(set(new_local_client_list))
FaceStore.objects.filter(staff_id=single_staff).update(client_sync_list=str(new_local_client_list))
except Exception as e:
print(e)
else:
resp_data['update_state'] = "cool"
return resp_data
def update_sync_state(sync_type, sync_state, client_user, face_data_len=0, success_sync=0):
"""
更新同步状态
"""
try:
# 全同步
if sync_type == "full_sync":
SyncInfo.objects.create(sync_type=sync_type,
sync_state=sync_state,
client_user_id=client_user,
face_data_len=face_data_len,
success_sync=success_sync)
# 人员列表检查同步
elif sync_type == "check_sync":
SyncInfo.objects.create(sync_type=sync_type,
sync_state=sync_state,
client_user_id=client_user)
except Exception as e:
print(e)
return "sync_info_error"
else:
return 'ok'
def update_pass_info(info_detail_list, task_id, client_user):
"""
更新通行日志
"""
resp_data = {
'task_id': task_id,
'pass_update_state': ''
}
try:
# 分条插入数据库中
for single_info in info_detail_list:
name = single_info.get('name')
staff_id = single_info.get('staff_id')
company_id = single_info.get('company_id')
pass_time = single_info.get('pass_time')
PassInfo.objects.create(name=name,
staff_id=staff_id,
company_id=company_id,
pass_time=pass_time,
client_user=client_user)
except Exception as e:
print(e)
resp_data['pass_update_state'] = 'error'
return resp_data
else:
resp_data['pass_update_state'] = 'success'
return resp_data
|
hamster1963/face-all-in-one-machine-backend
|
face_machine_client/consumers.py
|
consumers.py
|
py
| 14,986 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "channels.generic.websocket.AsyncWebsocketConsumer",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "channels.db.database_sync_to_async",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "face_machine_client.models.ClientInfo.objects.filter",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "face_machine_client.models.ClientInfo.objects",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "face_machine_client.models.ClientInfo",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects.filter",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "face_irobot_main.models.FaceStore",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects.filter",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "face_irobot_main.models.FaceStore",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects.filter",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "face_irobot_main.models.FaceStore",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects.filter",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "face_irobot_main.models.FaceStore",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects.filter",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects",
"line_number": 298,
"usage_type": "attribute"
},
{
"api_name": "face_irobot_main.models.FaceStore",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects.filter",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "face_irobot_main.models.FaceStore.objects",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "face_irobot_main.models.FaceStore",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "face_machine_client.models.SyncInfo.objects.create",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "face_machine_client.models.SyncInfo.objects",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "face_machine_client.models.SyncInfo",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "face_machine_client.models.SyncInfo.objects.create",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "face_machine_client.models.SyncInfo.objects",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "face_machine_client.models.SyncInfo",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "face_machine_client.models.PassInfo.objects.create",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "face_machine_client.models.PassInfo.objects",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "face_machine_client.models.PassInfo",
"line_number": 352,
"usage_type": "name"
}
] |
27595046406
|
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (
QHBoxLayout,
QListWidgetItem,
QCheckBox,
QGridLayout,
QWidget,
QComboBox,
QListWidget,
)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib
import matplotlib.pyplot as plt
from code_SRC.composantes import Time
from datetime import date
class Save_plot_app(QWidget):
"""Plot app that shows a plot chart of a specific leaderboard.
#####################################################
# # #
# # #
# # #
# # #
# # Plot #
# DATA # CHART #
# LIST # #
# # #
# # #
# # #
# # #
#####################################################
"""
def __init__(self, data):
def list_widget() -> QListWidget:
self.listwidget = QListWidget()
self.listwidget.setFixedWidth(450)
for rank, entry in enumerate(runs_times, start=1):
delta = Time(entry - runs_times[0])
string = f"{rank:4} {Time(entry)} {str(delta):>10} (-{(runs_times[0] - entry) / runs_times[0]:>6.2%})"
one_line = QListWidgetItem(string)
one_line.setFont(QFont("Lucida Sans Typewriter", 10))
self.listwidget.addItem(one_line)
# self.listwidget.clicked.connect(self.list_clicked)
return self.listwidget
def plot_widget() -> FigureCanvas:
self.canvas = FigureCanvas(plt.Figure(tight_layout=True))
matplotlib.rc("font", **{"weight": "normal", "size": 16})
self.update_plot()
return self.canvas
runs_times, pb, WR_time = data
super().__init__()
self.data = runs_times
self.pb = pb.seconds
self.WR = WR_time
self.setMinimumSize(1400, 600)
self.layout = QGridLayout()
self.setLayout(self.layout)
self.layout.addWidget(list_widget(), 0, 0)
self.layout.addWidget(plot_widget(), 0, 1)
def plot(self):
self.canvas.figure.clf()
self.ax = self.canvas.figure.subplots()
self.ax.plot([x for x in range(1, len(self.data) + 1)], self.data)
self.ax.axhline(self.WR, color="goldenrod")
self.ax.set_yticks(self.ax.get_yticks())
self.ax.set_yticklabels([str(Time(x)) for x in self.ax.get_yticks()])
self.canvas.draw()
def update_plot(self):
self.plot()
|
GB127/SRC-statistics
|
plots/save_plot.py
|
save_plot.py
|
py
| 2,948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QListWidget",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "code_SRC.composantes.Time",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "code_SRC.composantes.Time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QListWidgetItem",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QFont",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QListWidget",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.Figure",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.rc",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGridLayout",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "code_SRC.composantes.Time",
"line_number": 75,
"usage_type": "call"
}
] |
22432578680
|
import pandas as pd
from openpyxl.styles import PatternFill
def rgb_to_hex(rgb):
return '%02x%02x%02x' % rgb
def darken(hex_code, shade):
shade = shade/10
#add something to convert shade number ie 9 actually equals 10% darker
RGB = tuple(int(hex_code[i:i + 2], 16) for i in (0, 2, 4))
r = RGB[0]
g = RGB[1]
b = RGB[2]
# darken by 10%
darken_R = int(round(r * shade))
darken_G = int(round(g * shade))
darken_B = int(round(b * shade))
rgbTuple = (darken_R, darken_G, darken_B) #tuple of RGB values to convert to Hex
return rgb_to_hex(rgbTuple)
def lighten(hex_code, shade):
shade = shade/10
RGB = tuple(int(hex_code[i:i + 2], 16) for i in (0, 2, 4))
r = RGB[0]
g = RGB[1]
b = RGB[2]
# lighten by 10%
lighten_R = int(round(r + ((255 - r) * shade), 0))
lighten_G = int(round(g + ((255 - g) * shade), 0))
lighten_B = int(round(b + ((255 - b) * shade), 0))
rgbTuple = (lighten_R, lighten_G, lighten_B) #tuple of RGB values to convert to Hex
return rgb_to_hex(rgbTuple)
def create_lighten_df(colorList):
empty_df_list = []
df = pd.DataFrame(columns=['Original Color Hex', 'Change %', 'Color Hex', 'Direction', 'Color'])
for item in colorList:
for num in range(11):
original_color = "#" + item
change_percent = str(num) + '0%'
color_hex = "#" + lighten(hex_code=item, shade=num)
direction = 'Lighter'
df = pd.concat([df,
pd.DataFrame.from_dict({'Original Color Hex': [original_color],
'Change %': [change_percent],
'Color Hex': [color_hex],
'Direction': [direction],
'Color' : ""})],
ignore_index=True)
return df
def create_darken_df(colorList):
empty_df_list = []
df = pd.DataFrame(columns=['Original Color Hex', 'Change %', 'Color Hex', 'Direction', 'Color'])
for item in colorList:
for num in range(11):
original_color = "#" + item
change_percent = str(num) + '0%'
color_hex = "#" + darken(hex_code=item, shade=num)
direction = 'Darken'
df = pd.concat([df,
pd.DataFrame.from_dict({'Original Color Hex': [original_color],
'Change %': [change_percent],
'Color Hex': [color_hex],
'Direction': [direction],
'Color' : ""})],
ignore_index=True)
print(df)
percDict = {
'100%': "00%",
'90%': "10%",
'80%': "20%",
'70%': "30%",
'60%': "40%",
'50%': "50%",
'40%': "60%",
'30%': "70%",
'20%': "80%",
'10%': "90%",
'00%': "100%"
}
df['Change %'] = df['Change %'].apply(lambda x: percDict[x])
df['Original Color Hex'] = df['Original Color Hex'].astype('category')
df = df.sort_index(ascending=False).reset_index(drop=True)
df['Sorting'] = df.index
sorter = ['#' + str(x) for x in colorList]
df['Original Color Hex'] = df['Original Color Hex'].cat.set_categories(sorter)
df = df.sort_values(by=['Original Color Hex', 'Sorting']).drop(columns='Sorting')
return df
###############start of work###################
# colorList is the list of hex codes that will be used to generate the report
colorList = ['CC0000', #red
'000000', #black
'4d4d4f', #dark gray
'969697', #medium gray
'dddddd', # light gray
'f3f3f3', # ultra light gray
'f58025', # ada compliant orange
'fdb913', # ada compliant yellow
'97ca3e', # ada compliant green
'479cd6', # ada compliant blue
'1d3c6d', # ada compliant navy
'751c59' # ada compliant purple
]
lighten_df = create_lighten_df(colorList) #create a dataframe of lightened colors
darken_df = create_darken_df(colorList) # create a dataframe of darkened colors
with pd.ExcelWriter("ColorShadeReferenceWorkbook.xlsx", engine="openpyxl") as writer:
# create the "Lighten" worksheet
sheet_name = "Lighten"
# Export DataFrame content
lighten_df.to_excel(writer, sheet_name=sheet_name, index=False)
# Set column width dimensions
sheet = writer.sheets[sheet_name]# open sheet
sheet.column_dimensions['A'].width = 18
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
sheet.column_dimensions['D'].width = 10
sheet.column_dimensions['E'].width = 9
# Set background colors depending on cell values
for cell, in sheet[f'E2:E{len(lighten_df) + 1}']: # Skip header row, process as many rows as there are DataFrames
value = lighten_df["Color Hex"].iloc[cell.row - 2] #set color value to row['Color Hex'] value
value = value.strip("#")
cell.fill = PatternFill(start_color=value, end_color=value, fill_type='solid')
#create the "Darken" worksheet
sheet_name = "Darken"
# Export DataFrame content
darken_df.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = writer.sheets[sheet_name]
# Set column width dimensions
sheet.column_dimensions['A'].width = 18
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
sheet.column_dimensions['D'].width = 10
sheet.column_dimensions['E'].width = 9
# Set background colors depending on cell values
for cell, in sheet[f'E2:E{len(darken_df) + 1}']: # Skip header row, process as many rows as there are DataFrames
value = darken_df["Color Hex"].iloc[cell.row - 2] #set color value to row['Color Hex'] value
value = value.strip("#") #strip #
cell.fill = PatternFill(start_color=value, end_color=value, fill_type='solid') #fill cell colid color
|
mbgoodin/ColorShadeGenerator
|
main.py
|
main.py
|
py
| 6,351 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "pandas.ExcelWriter",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 161,
"usage_type": "call"
}
] |
31983482821
|
"""empty message
Revision ID: cadce5b06ea7
Revises: 5c4a7cc7d0e2
Create Date: 2020-03-25 11:23:09.141719
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'cadce5b06ea7'
down_revision = '5c4a7cc7d0e2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_user_email', table_name='user')
op.drop_index('ix_user_username', table_name='user')
op.drop_table('user')
op.drop_table('followers')
op.drop_index('ix_post_timestamp', table_name='post')
op.drop_table('post')
op.add_column('label_data', sa.Column('is_labeled', sa.Integer(), nullable=True))
op.create_index(op.f('ix_label_data_is_labeled'), 'label_data', ['is_labeled'], unique=False)
op.drop_index('ix_label_data_is_labled', table_name='label_data')
op.drop_column('label_data', 'is_labled')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('label_data', sa.Column('is_labled', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.create_index('ix_label_data_is_labled', 'label_data', ['is_labled'], unique=False)
op.drop_index(op.f('ix_label_data_is_labeled'), table_name='label_data')
op.drop_column('label_data', 'is_labeled')
op.create_table('post',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('body', mysql.VARCHAR(length=140), nullable=True),
sa.Column('timestamp', mysql.DATETIME(), nullable=True),
sa.Column('user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('language', mysql.VARCHAR(length=5), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='post_ibfk_1'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('ix_post_timestamp', 'post', ['timestamp'], unique=False)
op.create_table('followers',
sa.Column('follower_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('followed_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['user.id'], name='followers_ibfk_1'),
sa.ForeignKeyConstraint(['follower_id'], ['user.id'], name='followers_ibfk_2'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_table('user',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('username', mysql.VARCHAR(length=64), nullable=True),
sa.Column('email', mysql.VARCHAR(length=120), nullable=True),
sa.Column('password_hash', mysql.VARCHAR(length=128), nullable=True),
sa.Column('about_me', mysql.VARCHAR(length=140), nullable=True),
sa.Column('last_seen', mysql.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.create_index('ix_user_username', 'user', ['username'], unique=True)
op.create_index('ix_user_email', 'user', ['email'], unique=True)
# ### end Alembic commands ###
|
huawenjin1995/Microblog
|
migrations/versions/cadce5b06ea7_.py
|
cadce5b06ea7_.py
|
py
| 3,298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "alembic.op.drop_index",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "alembic.op.add_column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_index",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "alembic.op.add_column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.INTEGER",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_index",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_table",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.INTEGER",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.VARCHAR",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.DATETIME",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.INTEGER",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.VARCHAR",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_index",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_table",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.INTEGER",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.INTEGER",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_table",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.INTEGER",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.VARCHAR",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.VARCHAR",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.VARCHAR",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.VARCHAR",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.DATETIME",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_index",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_index",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 72,
"usage_type": "name"
}
] |
18135169137
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
page = '<h1>Powers of two </h1>\n'
for i in range(32):
line = '2^{} = {}<br/>\n'.format(i, 2**i)
page += line
return page
if __name__ == '__main__':
app.run(debug=True)
|
janirac/Interapt-Labs
|
Python/Flask/pyflask/ANSWERS/flask_powers_of_two.py
|
flask_powers_of_two.py
|
py
| 297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
}
] |
19246991804
|
from .model import ImageEncoder, ImageDecoder128
import torch
class AE(torch.nn.Module):
def __init__(self):
super().__init__()
self.encoder = ImageEncoder([1, 128, 256, 512, 1024, 2048, 4096], kernel_size=3, stride=2)
self.decoder = ImageDecoder128()
self.tanh = torch.nn.Tanh()
# self.gelu = torch.nn.GELU()
self.sigmoid = torch.nn.Sigmoid()
self.range = 1024.0
def forward(self, image):
x = image / self.range
x = self.encoder(x)
# activate here if u want, tho prob bad idea :(
x = self.tanh(x) #maybe not so bad idea, enforces latent space to be between -1 and 1
x = self.decoder(x)
x = self.sigmoid(x) * self.range
return x
|
pmistry9597/Climate-Hack-2022
|
attention_conv/ae.py
|
ae.py
|
py
| 763 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "model.ImageEncoder",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "model.ImageDecoder128",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "attribute"
}
] |
31411828229
|
import requests
import unittest
import json
from Global_base import global_base,globa_phone
from parameterized import parameterized
from Global_base import login
class GetMainLinkBySublink(unittest.TestCase):
"""我的建议接口"""
def setUp(self):
self.url = global_base.DefTool.url(self, '/usercenter/user/suggest')
def tearDown(self):
print("请求地址为{}".format(self.url))
print("请求参数为{}".format(json.dumps(self.params, indent=2, sort_keys=False, ensure_ascii=False)))
print("请求结果为:{}".format(json.dumps(self.result, indent=2, sort_keys=False, ensure_ascii=False)))
@parameterized.expand([
(
"建议提交", "A11", "", "867910035562539", "2.6.0", "15", "1003",
"1", "sinaif", "ef70fb3178dccde19df9295a68aca0a3", "qsj")
])
# @unittest.skip("pass")
def test_getMainLinkBySublink(self, name, suggestcontent, contactway, deviceId, ver, verno,
productId, deviceType, channelId, deviceToken, mjbname):
"""我的建议接口"""
values = login.LoginByPassWord().login_by_password(int(globa_phone.phone()))
token = values[1]
header = {"token": token}
pa = {"suggestcontent": suggestcontent, "contactway": contactway, "deviceId": deviceId, "ver": ver,
"verno": verno, "productId": productId, "channelId": channelId, "deviceToken": deviceToken,
"mjbname": mjbname, "deviceType": deviceType}
self.params = global_base.DefTool().payload(**pa)
self.result = requests.post(url=self.url, headers=header, data=self.params).json()
self.assertEqual(self.result["msg"], "ok")
self.assertEqual(self.result['code'], 200)
if __name__ == '__main__':
unittest.main()
|
chenyongzhiaaron/pytest-test-interface
|
interface/app/suggest_test.py
|
suggest_test.py
|
py
| 1,803 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "Global_base.global_base.DefTool.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "Global_base.global_base.DefTool",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "Global_base.global_base",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Global_base.login.LoginByPassWord",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Global_base.login",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Global_base.globa_phone.phone",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Global_base.globa_phone",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Global_base.global_base.DefTool",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Global_base.global_base",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 41,
"usage_type": "call"
}
] |
26038862506
|
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.codegen.soap.soap_subsystem import SoapSubsystem
from pants.backend.codegen.soap.target_types import WsdlSourcesGeneratorTarget
from pants.core.goals.tailor import (
AllOwnedSources,
PutativeTarget,
PutativeTargets,
PutativeTargetsRequest,
)
from pants.engine.fs import PathGlobs, Paths
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.dirutil import group_by_dir
from pants.util.logging import LogLevel
@dataclass(frozen=True)
class PutativeWsdlTargetsRequest(PutativeTargetsRequest):
pass
@rule(level=LogLevel.DEBUG, desc="Determine candidate WSDL targets to create")
async def find_putative_targets(
req: PutativeWsdlTargetsRequest,
all_owned_sources: AllOwnedSources,
soap_subsystem: SoapSubsystem,
) -> PutativeTargets:
if not soap_subsystem.tailor:
return PutativeTargets()
all_wsdl_files = await Get(Paths, PathGlobs, req.path_globs("*.wsdl"))
unowned_wsdl_files = set(all_wsdl_files.files) - set(all_owned_sources)
pts = [
PutativeTarget.for_target_type(
WsdlSourcesGeneratorTarget,
path=dirname,
name=None,
triggering_sources=sorted(filenames),
)
for dirname, filenames in group_by_dir(unowned_wsdl_files).items()
]
return PutativeTargets(pts)
def rules():
return [
*collect_rules(),
UnionRule(PutativeTargetsRequest, PutativeWsdlTargetsRequest),
]
|
pantsbuild/pants
|
src/python/pants/backend/codegen/soap/tailor.py
|
tailor.py
|
py
| 1,624 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "pants.core.goals.tailor.PutativeTargetsRequest",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.tailor.AllOwnedSources",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pants.backend.codegen.soap.soap_subsystem.SoapSubsystem",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.tailor.PutativeTargets",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pants.engine.internals.selectors.Get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Paths",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.PathGlobs",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "pants.core.goals.tailor.PutativeTarget.for_target_type",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pants.backend.codegen.soap.target_types.WsdlSourcesGeneratorTarget",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "pants.core.goals.tailor.PutativeTarget",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pants.util.dirutil.group_by_dir",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.tailor.PutativeTargets",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pants.core.goals.tailor.PutativeTargets",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.collect_rules",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pants.engine.unions.UnionRule",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pants.core.goals.tailor.PutativeTargetsRequest",
"line_number": 52,
"usage_type": "argument"
}
] |
9023100800
|
import math
import time
from selenium import webdriver
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
link = 'http://suninjuly.github.io/redirect_accept.html'
try:
browser = webdriver.Chrome()
browser.get(link)
time.sleep(4)
# browser.execute_script("document.getElementsByTagName('button')[0].classList.remove('trollface');")
browser.find_element_by_css_selector('[type="submit"]').click()
lst_win = browser.window_handles
current_win = lst_win[1]
browser.switch_to.window(current_win)
print(browser.current_url)
x = browser.find_element_by_id('input_value')
print(x.text)
browser.find_element_by_id('answer').send_keys(calc(x.text))
browser.find_element_by_css_selector("[type='submit']").click()
print(browser.switch_to.alert.text)
time.sleep(2)
browser.switch_to.alert.accept()
finally:
browser.quit()
|
Bulgakoff/auto-tests-course
|
less_2/windows/less2_step3_wins_1.py
|
less2_step3_wins_1.py
|
py
| 920 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.log",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
}
] |
17141988862
|
import time
import math
import torch
from torch import nn, Tensor
from torch.optim import Adam
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from transformers import logging
from utils.utils import AverageMeter
from .adaptformer import AdaptFormer
from torch.nn.parallel import DistributedDataParallel as DDP
class AdaptFormerm(nn.Module):
def __init__(self, num_frames, num_classes, global_pool=True):
super().__init__()
self.backbone = AdaptFormer(all_frames=num_frames, vpt_on=True, name_ckpt='pretrain_vit_base_1600.pth')
self.layer_norm = nn.LayerNorm(self.backbone.num_features)
self.classif = nn.Linear(in_features=self.backbone.num_features, out_features=num_classes)
self.global_pool = global_pool
def forward(self, images):
x = self.backbone(images)
if self.global_pool:
x = x.mean(1)
else:
x = x[:, 0]
x = self.layer_norm(x)
out = self.classif(x)
return out
class AdaptFormermExecutor:
def __init__(self, train_loader, test_loader, criterion, eval_metric, class_list, test_every, distributed, gpu_id) -> None:
super().__init__()
self.train_loader = train_loader
self.test_loader = test_loader
self.criterion = criterion.to(gpu_id)
self.eval_metric = eval_metric.to(gpu_id)
self.class_list = class_list
self.test_every = test_every
self.distributed = distributed
self.gpu_id = gpu_id
num_frames = self.train_loader.dataset[0][0].shape[0]
num_classes = len(class_list)
logging.set_verbosity_error()
model = AdaptFormerm(num_frames, num_classes).to(gpu_id)
if distributed:
self.model = DDP(model, device_ids=[gpu_id])
else:
self.model = model
# train everything other than self.model.backbone
for name, p in self.model.named_parameters():
if not name.startswith('backbone'):
p.requires_grad = True
# within the backbone only train the missing keys in self.model.backbone
for name, p in self.model.backbone.named_parameters():
if name in self.model.backbone.msg.missing_keys:
p.requires_grad = True
else:
p.requires_grad = False
self.optimizer = self._construct_optimizer()
self.scheduler = CosineAnnealingWarmRestarts(self.optimizer, T_0=10, eta_min=1e-8)
def _construct_optimizer(self):
optim_params = []
for p in self.model.parameters():
if p.requires_grad:
optim_params.append(p)
return Adam(optim_params, lr=0.01, weight_decay=0.)
def _train_batch(self, data, label):
self.optimizer.zero_grad()
output = self.model(data)
loss_this = self.criterion(output, label)
loss_this.backward()
self.optimizer.step()
return loss_this.item()
def _train_epoch(self, epoch):
self.model.train()
loss_meter = AverageMeter()
start_time = time.time()
for data, label in self.train_loader:
data, label = data.to(self.gpu_id, non_blocking=True), label.to(self.gpu_id, non_blocking=True)
loss_this = self._train_batch(data, label)
loss_meter.update(loss_this, data.shape[0])
elapsed_time = time.time() - start_time
if (self.distributed and self.gpu_id == 0) or not self.distributed:
print("Epoch [" + str(epoch + 1) + "]"
+ "[" + str(time.strftime("%H:%M:%S", time.gmtime(elapsed_time))) + "]"
+ " loss: " + "{:.4f}".format(loss_meter.avg), flush=True)
def train(self, start_epoch, end_epoch):
for epoch in range(start_epoch, end_epoch):
self._train_epoch(epoch)
self.scheduler.step()
if (epoch + 1) % self.test_every == 0:
eval = self.test()
if (self.distributed and self.gpu_id == 0) or not self.distributed:
print("[INFO] Evaluation Metric: {:.2f}".format(eval * 100), flush=True)
def test(self):
self.model.eval()
eval_meter = AverageMeter()
for data, label in self.test_loader:
data, label = data.to(self.gpu_id), label.long().to(self.gpu_id)
with torch.no_grad():
output = self.model(data)
eval_this = self.eval_metric(output, label)
eval_meter.update(eval_this.item(), data.shape[0])
return eval_meter.avg
def save(self, file_path="./checkpoint.pth"):
backbone_state_dict = self.model.backbone.state_dict()
optimizer_state_dict = self.optimizer.state_dict()
torch.save({"backbone": backbone_state_dict,
"optimizer": optimizer_state_dict},
file_path)
def load(self, file_path):
checkpoint = torch.load(file_path)
self.model.backbone.load_state_dict(checkpoint["backbone"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
class PositionalEncoding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 2000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(1, max_len, d_model)
pe[0, :, 0::2] = torch.sin(position * div_term)
pe[0, :, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe, persistent=False)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x: Tensor, shape [batch_size, seq_len, embedding_dim]
"""
x = x + self.pe[:, :x.size(1)]
out = self.dropout(x)
return out
class GroupWiseLinear(nn.Module):
# could be changed to:
# output = torch.einsum('ijk,zjk->ij', x, self.W)
# or output = torch.einsum('ijk,jk->ij', x, self.W[0])
def __init__(self, num_class, hidden_dim, bias=True):
super().__init__()
self.num_class = num_class
self.hidden_dim = hidden_dim
self.bias = bias
self.W = nn.Parameter(torch.Tensor(1, num_class, hidden_dim))
if bias:
self.b = nn.Parameter(torch.Tensor(1, num_class))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.W.size(2))
for i in range(self.num_class):
self.W[0][i].data.uniform_(-stdv, stdv)
if self.bias:
for i in range(self.num_class):
self.b[0][i].data.uniform_(-stdv, stdv)
def forward(self, x):
# x: B,K,d
x = (self.W * x).sum(-1)
if self.bias:
x = x + self.b
return x
|
mondalanindya/MSQNet
|
multi-label-action-main/models/adaptformerm.py
|
adaptformerm.py
|
py
| 7,084 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "adaptformer.AdaptFormer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "transformers.logging.set_verbosity_error",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "transformers.logging",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.parallel.DistributedDataParallel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "utils.utils.AverageMeter",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "utils.utils.AverageMeter",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "torch.arange",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "torch.cos",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 167,
"usage_type": "call"
}
] |
75154654266
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'django'
]
setup(name='djinn',
version="1.0.0-snapshot",
description='PythonUnited Intranet',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: Freely Distributable",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Topic :: Software Development :: Libraries :: Application Frameworks"
],
author='PythonUnited',
author_email='[email protected]',
license='beer-ware',
url='https://github.com/PythonUnited/djinn',
keywords='Djinn Core',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires = requires,
tests_require= requires,
test_suite="djinn",
entry_points = """\
"""
)
|
PythonUnited/djinn
|
setup.py
|
setup.py
|
py
| 1,172 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 31,
"usage_type": "call"
}
] |
73786272509
|
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from math import sqrt
from utils import mape
def baseline(df):
# predict next year by just taking the values from last year
predicted = df.loc['2016-01-01':'2016-12-31']
actual = df.loc['2017-01-01':'2017-12-31']
# and padding the missing values with the last datapoint
actual.loc[actual < 1] = np.nan
actual = actual.fillna(method='pad')
# transform df to series
predicted = pd.Series(predicted)
actual = pd.Series(actual)
print("Number of actual days:", len(actual))
print('Number of predicted days:', len(predicted))
rmse = sqrt(mean_squared_error(actual, predicted))
print('For 12 Months RMSE: %.3f' % rmse)
mape_value = mape(actual, predicted)
print ("For 12 Months MAPE :", mape_value)
def calculate_baseline(path):
df = pd.read_pickle(path)
baseline(df)
# Call Baseline Method
calculate_baseline("data/preprocessed/branch1.pkl")
|
coomish/stl-transferability
|
Baseline.py
|
Baseline.py
|
py
| 995 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.nan",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.mape",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 27,
"usage_type": "call"
}
] |
39868260221
|
"""
Настройка админки
"""
import logging
from typing import Dict
import toml
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.apps import AdminConfig
from django.http import HttpRequest
from django.template.response import TemplateResponse
from django.urls import path
class CustomAdmin(admin.AdminSite):
site_header = f"{settings.SLOGAN}"
index_title = f"{settings.INDEX_TITLE}"
def __init__(self, *args, **kwargs):
self.version_file = "version.toml"
self.changelog_file = "CHANGELOG.md"
super().__init__(*args, **kwargs)
def get_versions(self) -> Dict:
"""Получение информации о версии релиза"""
try:
return toml.load(self.version_file)
except (toml.TomlDecodeError, FileNotFoundError) as error:
logging.error(
"Ошибка при загрузке файла %s: %s", self.version_file, error
)
return {}
def each_context(self, request):
site_data = super().each_context(request)
release_data = self.get_versions()
return {
**site_data,
"version": release_data.get("version"),
"release_url": release_data.get("release_url"),
"release_date": release_data.get("release_date"),
}
def get_urls(self):
urls = super().get_urls()
custom_urls = [
path("release/", self.admin_view(self.changelog), name="release")
]
return custom_urls + urls
def changelog(self, request: HttpRequest) -> TemplateResponse:
try:
with open(self.changelog_file, "r", encoding="utf-8") as file:
changelog = file.read()
except (FileNotFoundError, UnicodeEncodeError) as error:
logging.error(
"Ошибка при загрузке файла %s: %s", self.changelog_file, error
)
changelog = "Ошибка при загрузке истории изменений"
context = dict(self.each_context(request), changelog=changelog)
return TemplateResponse(request, "admin/changelog.html", context)
class MainAdminConfig(AdminConfig):
"""Переопределение класса админки"""
default_site = "main.admin_config.CustomAdmin"
|
artemgv/spacemusic
|
app/main/admin_config.py
|
admin_config.py
|
py
| 2,473 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.AdminSite",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.SLOGAN",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.INDEX_TITLE",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "toml.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "toml.TomlDecodeError",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.http.HttpRequest",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.template.response.TemplateResponse",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.template.response.TemplateResponse",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.apps.AdminConfig",
"line_number": 68,
"usage_type": "name"
}
] |
16621629920
|
"""
refresh_tokens.py
"""
from __future__ import print_function
import getpass
import json
import requests
import sys
import time
from os import path
from .exceptions import AgaveTokenError
from ..utils import handle_bad_response_status_code
def refresh_token(api_key, api_secret, refresh_token, tenant_url):
""" Retrieve a new Oauth bearer token
PARAMETERS
----------
api_key: str
api_secret: str
refresh_token: str
tenant_url : str
RETURNS
-------
token_data: dict
access_token: str
refresh_token: str
expires_in: str
created_at: str
expires_at: str
"""
# Set request endpoint.
endpoint = "{0}{1}".format(tenant_url, "/token")
# Make request.
try:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"scope": "PRODUCTION"
}
resp = requests.post(endpoint, headers=headers, data=data,
auth=(api_key, api_secret))
except requests.exceptions.MissingSchema as err:
raise AgaveTokenError(err)
# Handle bad status code.
handle_bad_response_status_code(resp)
# Return pertinent value.
response = resp.json()
now = int(time.time())
expires_at = now + int(response["expires_in"])
token_data = {
"access_token": response["access_token"],
"refresh_token": response["refresh_token"],
"expires_in": response["expires_in"],
"created_at": str(now),
"expires_at": time.strftime("%a %b %-d %H:%M:%S %Z %Y", time.localtime(expires_at))
}
return token_data
|
tapis-project/tapispy
|
tapispy/tokens/refresh_tokens.py
|
refresh_tokens.py
|
py
| 1,724 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "exceptions.AgaveTokenError",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "utils.handle_bad_response_status_code",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 66,
"usage_type": "call"
}
] |
71718720189
|
import sys
import json
asmFilename = sys.argv[1]
with open(asmFilename, 'r') as file:
lines = file.readlines()
pyFile = open("pythonDict.py", 'r')
addrDict = json.loads(pyFile.readline())
if addrDict:
print("\nChanging labels...")
for index, line in enumerate(lines):
words = line.split()
if words:
if words[0][:-1] in addrDict:
print(lines[index][:-1] + ' -> ', end='')
line = line.replace(words[2], addrDict[words[0][:-1]])
lines[index] = line
print(lines[index][:-1])
with open(asmFilename, 'w') as file:
file.writelines(lines)
print("Labels changed.\n")
else: print("No labels to change\n")
|
mousssse/SystemeInformatique
|
branching.py
|
branching.py
|
py
| 746 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.