blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b842410ff09b5c4c5ee02673863363ca749c5e3 | e6af4bb164fe4361a3f016bde78087997851a96c | /AI/src/tools/build_negative.py | b2406b4468f5d0c95576ccf5d435c861b752ef34 | [] | no_license | gagerblog/Oscar-smart-trash | c158582d755fb6dcb8bc5a21a41989a5756e8f23 | b4cd78cdc56deb6482965016026e8e0a94e2e2d8 | refs/heads/master | 2020-08-24T00:09:30.231705 | 2018-07-17T08:00:46 | 2018-07-17T08:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,291 | py | """Build TFRecords from ImageNet dataset."""
import os
import csv
import random
import logging
import xml.etree.ElementTree as ET
import tensorflow as tf
from logging_utils import setup_logging
from utils import process_dataset
FLAGS = tf.flags.FLAGS
def define_flags():
"""Define flags to config building process."""
# Output data Flags
tf.flags.DEFINE_string(
"tfrecord_output_dir",
"/media/ubuntu/Data/dataset/recycle/data/interim/test",
"Output directory for TFRecord files."
)
# Input data Flags
tf.flags.DEFINE_string("image_list",
"resources/negative_images.csv",
"File contains list of images to build dataset from.")
tf.flags.DEFINE_string("image_dir",
"data/ILSVRC2012_val/",
"Directory of ImageNet's images.")
tf.flags.DEFINE_string("label_dir",
"data/val/",
"Directory of ImageNet's labels.")
# Build config Flags
tf.flags.DEFINE_float(
"min_area",
0.4,
"Minimum area of the object to consider valid."
)
tf.flags.DEFINE_string(
"subset_to_build",
"all",
"The ImageNet dataset to build (train/val/test/all)."
)
tf.flags.DEFINE_float(
"train_percentage",
0.8,
"Percentage of images to put in training set."
)
tf.flags.DEFINE_integer(
"train_shards",
4,
"Number of shards for negative training set."
)
tf.flags.DEFINE_float(
"val_percentage",
0.1,
"Percentage of images to put in validation set."
)
tf.flags.DEFINE_integer(
"val_shards",
2,
"Number of shards for negative validation set."
)
tf.flags.DEFINE_integer(
"test_shards",
2,
"Number of shards for negative test set."
)
tf.flags.DEFINE_integer(
"num_threads",
4,
"Number of threads to write images in TFRecord files."
)
def convert_to_infos(images):
"""Convert to image infos format with label -1."""
image_infos = []
for filepath in images:
label = -1
image_infos.append({"filepath": filepath, "label": label})
return image_infos
def parse_bbox_xml(file_name):
"""Get image info and list of bounding boxes from a xml file"""
logger = logging.getLogger()
try:
tree = ET.parse(file_name)
except Exception as e:
msg = "Failed to parse {}: {}".format(file_name, e)
logger.warning(msg)
return None, None
root = tree.getroot()
image_size = root.find("size")
image_width = int(image_size.find("width").text)
image_height = int(image_size.find("height").text)
image_info = {
"width": image_width,
"height": image_height
}
# Loop through each bbox, get coordinates and object WordNet ID
bbox = []
for obj in root.iter("object"):
name = obj.find("name").text
bndbox = obj.find("bndbox")
xmin = int(bndbox.find("xmin").text)
ymin = int(bndbox.find("ymin").text)
xmax = int(bndbox.find("xmax").text)
ymax = int(bndbox.find("ymax").text)
bbox.append({"name": name,
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax})
return image_info, bbox
def is_valid(label_name, wnid, label_dir, min_area):
"""Check if an image is valid by checking the size of the largest object.
A valid image contains at least one object of interest with area larger than
a threshold.
Args:
label_name: str, name of the file contains the description of the image.
wnid: str, the WordNetID of interest.
label_dir: str directory of the label files.
min_area: float minimum percentage of object to consider valid.
Returns:
valid: bool, whether the image is considered as valid.
"""
image_info, bbox = parse_bbox_xml(
os.path.join(label_dir, label_name))
if not image_info:
return False
image_size = image_info["width"] * image_info["height"] * 1.0
max_size = 0.0
for b in bbox:
if b["name"].strip().lower() != wnid:
continue
obj_size = abs((b["xmax"] - b["xmin"]) * (b["ymax"] - b["ymin"]) * 1.0)
if obj_size > max_size:
max_size = obj_size
return max_size / image_size > min_area
def split_images(images, train_percentage, val_percentage):
"""Split the images dataset into train/val/test set."""
image_train, image_val, image_test = [], [], []
for iid in images:
num_images = len(images[iid])
num_trains = int(train_percentage * num_images)
num_vals = int(val_percentage * num_images)
num_tests = num_images - num_trains - num_vals
image_train.extend(images[iid][:num_trains])
image_val.extend(images[iid][num_trains: num_trains + num_vals])
image_test.extend(images[iid][-num_tests:])
return image_train, image_val, image_test
def get_valid_images(image_list, image_dir, label_dir, min_area):
"""Get all valid images and sort by classes to build balance datasets.
Args:
image_list: list of string name of the images.
image_dir: string path to the folder contains the images.
label_dir: str directory of the label files.
min_area: float minimum percentage of object to consider valid.
Returns:
valid_images: dict with key is the id of the classes and value is the
list of the path of valid images.
"""
valid_images = {}
with open(image_list, "rt") as f:
csv_reader = csv.reader(f)
next(csv_reader) # Skip header
for i, row in enumerate(csv_reader):
image_name, label_name, wnid, iid = row
if not is_valid(label_name, wnid, label_dir, min_area):
continue
image_path = os.path.join(image_dir, image_name)
if iid in valid_images:
valid_images[iid].append(image_path)
else:
valid_images[iid] = [image_path]
return valid_images
def get_image_infos(image_list, image_dir, train_percentage, val_percentage,
label_dir, min_area):
"""Get lists of train/val/test image infos."""
valid_images = get_valid_images(image_list, image_dir, label_dir, min_area)
image_train, image_val, image_test = split_images(valid_images,
train_percentage,
val_percentage)
image_train = convert_to_infos(image_train)
image_val = convert_to_infos(image_val)
image_test = convert_to_infos(image_test)
return image_train, image_val, image_test
def main(unused_argv):
"""Build TFRecords."""
random.seed(333)
setup_logging(filename="tfrecord_negative.log")
logger = logging.getLogger()
logger.info("Start building negative dataset")
if not tf.gfile.IsDirectory(FLAGS.tfrecord_output_dir):
tf.gfile.MakeDirs(FLAGS.tfrecord_output_dir)
subset = FLAGS.subset_to_build.lower().strip()
assert subset == "all" or subset == "train" or subset == "val" or \
subset == "test"
image_train, image_val, image_test = get_image_infos(
FLAGS.image_list, FLAGS.image_dir,
FLAGS.train_percentage, FLAGS.val_percentage,
FLAGS.label_dir, FLAGS.min_area)
if subset == "train" or subset == "all":
process_dataset("negative-train", FLAGS.train_shards, image_train,
1, FLAGS.num_threads, FLAGS.tfrecord_output_dir)
if subset == "val" or subset == "all":
process_dataset("negative-val", FLAGS.val_shards, image_val,
1, FLAGS.num_threads, FLAGS.tfrecord_output_dir)
if subset == "test" or subset == "all":
process_dataset("negative-test", FLAGS.test_shards, image_test,
1, FLAGS.num_threads, FLAGS.tfrecord_output_dir)
logger.info("Finish building negative dataset")
if __name__ == '__main__':
define_flags()
tf.app.run()
| [
"[email protected]"
] | |
bbbfc6df2ceef7fe88b55150129b9adcebb1bfbb | f5214006a587cf363ced0945af144d7c96ff65b7 | /resources/hoja_control.py | d0c12c624f99fd8a44f9e536ffa11e31ac384766 | [] | no_license | lcevallo/sistagua-api | 73fd985a3a98f0db9c8b192469aac1e6f356eb65 | 361bb9a2b0ba00a68d924ac675ca6a9f64f8d095 | refs/heads/main | 2023-04-17T09:36:14.000522 | 2021-05-08T09:39:00 | 2021-05-08T09:39:00 | 331,178,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,448 | py | import datetime
import re
from http import HTTPStatus
from flask import request
from flask_restful import Resource
import myconnutils
from models.ficha_tecnica import FichaTecnica
from models.hoja_control import HojaControl
from models.hoja_control_detalle import HojaControlDetalle
from models.temp.hoja_control_tmp import HojaControlTMP
class FichaTecnicaResource(Resource):
def get(self):
id = request.args.get('id')
ficha_tecnica = self.buscar_x_id(id)
if ficha_tecnica:
return ficha_tecnica
return {'message': 'Ficha tecnica no encontrada'}, 404
def post(self):
data = request.get_json()
ficha_tecnica_object = self.find_by_cedula(data['cedula'], data['codigo'])
if ficha_tecnica_object is not None:
return {'mensaje': 'la ficha tecnica para ese usuario ya existe en la base de datos'}
else:
ficha_tecnica_id = self.insert(data)
if ficha_tecnica_id:
ficha_tecnica_object = self.buscar_x_id(ficha_tecnica_id)
print(ficha_tecnica_object)
return {'ficha_tecnica': ficha_tecnica_object}, HTTPStatus.CREATED
@classmethod
def find_by_cedula(cls, cedula, codigo):
connection = myconnutils.getConnection()
cursor = connection.cursor()
# query = 'SELECT `tid`,`participant_id`,`firstname`,`lastname`,`email`,`token`,`usesleft` from
# `lime_tokens_782729` WHERE `token`= %s '
query = '''
SELECT
ficha_tecnica.*
FROM ficha_tecnica
INNER JOIN cliente_ficha
ON ficha_tecnica.fk_cliente = cliente_ficha.id
WHERE cliente_ficha.cedula = %s
AND ficha_tecnica.codigo = %s
AND ficha_tecnica.publish = TRUE
'''
cursor.execute(query, (cedula, codigo))
row = cursor.fetchone()
connection.close()
if row:
ficha_tecnica = FichaTecnica(
row['id'],
row['fk_cliente'],
row['codigo'],
row['tds'],
row['ppm'],
row['visitas'],
row['fecha_comprado'],
row['created_at'],
row['updated_at'],
row['publish']
)
return ficha_tecnica.data
else:
return None
@classmethod
def buscar_x_id(cls, id):
connection = myconnutils.getConnection()
cursor = connection.cursor()
query = """
SELECT
ficha_tecnica.*
FROM ficha_tecnica
WHERE ficha_tecnica.publish = TRUE
AND ficha_tecnica.id = %s
"""
cursor.execute(query, (id,))
row = cursor.fetchone()
connection.close()
if row:
ficha_tecnica = FichaTecnica(
row['id'],
row['fk_cliente'],
row['codigo'],
row['tds'],
row['ppm'],
row['visitas'],
row['fecha_comprado'],
row['created_at'],
row['updated_at'],
row['publish']
)
return ficha_tecnica.data
else:
return None
@classmethod
def insert(cls, valor):
fecha_comprado_format = datetime.datetime.strptime(valor['fecha_comprado'], "%Y-%m-%d")
connection = myconnutils.getConnection()
cursor = connection.cursor()
query_insert = """
INSERT INTO
`ficha_tecnica`(
`fk_cliente`,
`codigo`,
`tds`,
`ppm`,
`visitas`,
`fecha_comprado`)
VALUES(
%s,
%s,
%s,
%s,
%s,
%s)
"""
cursor.execute(query_insert, (
valor['fk_cliente'],
valor['codigo'],
valor['tds'],
valor['ppm'],
valor['visitas'],
fecha_comprado_format
)
)
connection.commit()
id_inserted = cursor.lastrowid
connection.close()
return id_inserted
@classmethod
def actualizar(cls, id, valor):
connection = myconnutils.getConnection()
cursor = connection.cursor()
query_update = """
UPDATE cliente_ficha
SET correo = %s,
nombre = %s,
apellidos = %s,
cedula = %s,
telefono = %s,
updated_at = CURRENT_TIMESTAMP()
WHERE
id = %s AND
publish = true
"""
cursor.execute(query_update, (
valor['correo'],
valor['nombre'],
valor['apellidos'],
valor['cedula'],
valor['telefono']
, id))
connection.commit()
print(cursor.rowcount, "record(s) affected")
connection.close()
@classmethod
def eliminar(cls, cedula):
connection = myconnutils.getConnection()
cursor = connection.cursor()
query_update = """UPDATE cliente_ficha
SET publish = false,
updated_at = CURTIME()
WHERE cedula = %s
"""
cursor.execute(query_update, (cedula,))
connection.commit()
print(cursor.rowcount, "record(s) affected logic deleted!")
connection.close()
class HojaControlResource(Resource):
def get(self, id):
hoja_control = self.buscar_x_id(id)
detalle_items = self.buscar_detalle_x_id(id)
return {'hoja_control': hoja_control, 'itemDetale': detalle_items}, HTTPStatus.OK
def delete(self, id):
hoja_control_response = self.buscar_x_id(id)
if hoja_control_response:
affected = self.borrarHojaControl(id)
if affected['hoja_control_id'] > 0:
return {'message': ''}, HTTPStatus.OK
else:
return {'message': f'No se pudo eliminar la hoja de control con id: {id}'}, HTTPStatus.BAD_REQUEST
else:
return {'message': f'Hoja de control con id:{id} no encontrada en la base'}, HTTPStatus.NOT_FOUND
@classmethod
def borrarHojaControl(cls, id):
query_delete_hoja_control = """
DELETE
FROM hoja_control
WHERE
id = %s
"""
query_delete_hoja_control_detalle_filtracion = """
DELETE
FROM hoja_control_detalle_filtracion
WHERE
fk_hoja_control_detalle IN (
SELECT
hoja_control_detalle.id
FROM hoja_control_detalle
WHERE hoja_control_detalle.fk_hoja_control = %s
)
"""
query_delete_hoja_control_detalle = """
DELETE
FROM hoja_control_detalle
WHERE
fk_hoja_control = %s
"""
connection = myconnutils.getConnection()
cursor = connection.cursor()
rows_afectada_detalle_filtracion = cursor.execute(query_delete_hoja_control_detalle_filtracion, (id,))
rows_afectada_detalle = cursor.execute(query_delete_hoja_control_detalle, (id,))
rows_afectada = cursor.execute(query_delete_hoja_control, (id,))
connection.commit()
connection.close()
return {'hoja_control_id': rows_afectada, 'ids_detalles': rows_afectada_detalle}
@classmethod
def getFechaFormateada(cls, fecha_no_formateada):
if fecha_no_formateada:
fecha_comprado = re.search('\d{4}-\d{2}-\d{2}', fecha_no_formateada)
fecha_formateada = datetime.datetime.strptime(fecha_comprado.group(), '%Y-%m-%d').date()
else:
fecha_formateada = None
return fecha_formateada
@classmethod
def buscar_x_id(cls, id):
query = """
SELECT
hoja_control.id,
hoja_control.fk_cliente,
hoja_control.tipo_cliente,
hoja_control.codigo,
hoja_control.tds,
hoja_control.ppm,
hoja_control.visitas,
hoja_control.fecha_comprado,
hoja_control.created_at,
hoja_control.updated_at,
hoja_control.publish
FROM hoja_control
WHERE hoja_control.id = %s
"""
connection = myconnutils.getConnection()
cursor = connection.cursor()
cursor.execute(query, (id,))
row = cursor.fetchone()
connection.close()
if row:
hoja_control = HojaControl(
row['id'],
row['fk_cliente'],
row['tipo_cliente'],
row['codigo'],
row['tds'],
row['ppm'],
row['visitas'],
row['fecha_comprado'],
row['created_at'],
row['updated_at'],
row['publish']
)
return hoja_control.data
else:
return None
@classmethod
def buscar_detalle_x_id(cls, id):
query = """
SELECT
hoja_control_detalle.id,
hoja_control_detalle.fk_hoja_control,
hoja_control_detalle.factura,
hoja_control_detalle.fecha_mantenimiento,
hoja_control_detalle.recibo,
hoja_control_detalle.hoja_control,
hoja_control_detalle.descripcion,
hoja_control_detalle.persona_autoriza,
hoja_control_detalle.firma_url,
hoja_control_detalle.cedula_autoriza,
hoja_control_detalle.persona_dio_mantenimiento,
hoja_control_detalle.cedula_dio_mantenimiento,
hoja_control_detalle.ppm,
hoja_control_detalle.tds,
hoja_control_detalle.created_at,
hoja_control_detalle.updated_at,
hoja_control_detalle.publish
FROM hoja_control_detalle
WHERE
hoja_control_detalle.fk_hoja_control = %s
"""
connection = myconnutils.getConnection()
cursor = connection.cursor()
cursor.execute(query, (id,))
rows = cursor.fetchall()
connection.close()
data = []
for row in rows:
if row:
hoja_control_detalle = HojaControlDetalle(
row['id'],
row['fk_hoja_control'],
row['factura'],
row['fecha_mantenimiento'],
row['recibo'],
row['hoja_control'],
row['descripcion'],
row['persona_autoriza'],
row['firma_url'],
row['cedula_autoriza'],
row['persona_dio_mantenimiento'],
row['cedula_dio_mantenimiento'],
row['ppm'],
row['tds'],
row['created_at'],
row['updated_at'],
row['publish']
)
data.append(hoja_control_detalle.data)
return data
class HojasControlListResource(Resource):
def get(self):
hojas_control = self.traer_hojas_control()
return {'hojas_control': hojas_control}, HTTPStatus.OK
def post(self):
json_data = request.get_json()
detalle_items = json_data['detalle']
detalle_json = json_data['detalle']
del json_data['detalle']
return self.guardar(json_data, detalle_items)
@classmethod
def traer_hojas_control(cls):
connection = myconnutils.getConnection()
cursor = connection.cursor()
query = """
SELECT
hoja_control.id,
hoja_control.codigo,
hoja_control.fk_cliente,
CASE hoja_control.tipo_cliente
WHEN 1 THEN (SELECT CONCAT_WS(' ',cn.nombre1, cn.nombre2, cn.apellido1, cn.apellido2) FROM cliente_natural cn WHERE cn.id= hoja_control.fk_cliente)
WHEN 2 THEN (SELECT CONCAT_WS(' ',ce.nombres) FROM cliente_empresarial ce WHERE ce.id= hoja_control.fk_cliente)
ELSE NULL
END as 'cliente',
hoja_control.tipo_cliente,
hoja_control.tds,
hoja_control.ppm,
hoja_control.visitas,
hoja_control.fecha_comprado
FROM hoja_control
"""
cursor.execute(query)
rows = cursor.fetchall()
connection.close()
data = []
for row in rows:
if row:
hoja_control_tmp = HojaControlTMP(
row['id'],
row['codigo'],
row['fk_cliente'],
row['cliente'],
row['tipo_cliente'],
row['tds'],
row['ppm'],
row['visitas'],
row['fecha_comprado']
)
data.append(hoja_control_tmp.data)
return data
@classmethod
def guardar(cls, hoja_control_json, hoja_control_detalle_json):
connection = myconnutils.getConnection()
cursor = connection.cursor()
fecha_comprado = cls.getFechaFormateada(hoja_control_json['fecha_comprado'])
query_hoja_control_insert = """
INSERT INTO hoja_control(fk_cliente, tipo_cliente, codigo, tds, ppm, visitas, fecha_comprado)
VALUES (%s, %s, %s, %s, %s, %s, %s)
"""
query_hoja_control_update = """
UPDATE hoja_control
SET fk_cliente = %s,
tipo_cliente = %s,
codigo = %s,
tds = %s,
ppm = %s,
visitas = %s,
fecha_comprado = %s,
updated_at = CURRENT_TIMESTAMP(),
publish = true
WHERE id = %s
"""
query_hoja_control_detalle_insert = """
INSERT INTO hoja_control_detalle (fk_hoja_control, factura, fecha_mantenimiento, recibo, hoja_control, descripcion, persona_autoriza, firma_url, cedula_autoriza, persona_dio_mantenimiento, cedula_dio_mantenimiento,ppm,tds)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s,%s)
"""
query_hoja_control_detalle_update = """
UPDATE hoja_control_detalle
SET factura = %s,
fecha_mantenimiento = %s,
recibo = %s,
hoja_control = %s,
descripcion = %s,
persona_autoriza = %s,
firma_url = %s,
cedula_autoriza = %s,
persona_dio_mantenimiento = %s,
cedula_dio_mantenimiento = %s,
ppm = %s,
tds = %s,
updated_at = CURRENT_TIMESTAMP()
WHERE id = %s
AND fk_hoja_control = %s
"""
query_hoja_control_detalle_delete = """
DELETE
FROM hoja_control_detalle
WHERE id = %s
AND fk_hoja_control = %s
"""
query_hoja_control_detalle_filtracion_delete = """
DELETE
FROM hoja_control_detalle_filtracion
WHERE
fk_hoja_control_detalle = %s
"""
if 'id' in hoja_control_json:
cursor.execute(query_hoja_control_update, (
hoja_control_json['fk_cliente'],
hoja_control_json['tipo_cliente'],
hoja_control_json['codigo'],
hoja_control_json['tds'],
hoja_control_json['ppm'],
hoja_control_json['visitas'],
fecha_comprado,
hoja_control_json['id']
)
)
id_hoja_control = hoja_control_json['id']
else:
cursor.execute(query_hoja_control_insert, (
hoja_control_json['fk_cliente'],
hoja_control_json['tipo_cliente'],
hoja_control_json['codigo'],
hoja_control_json['tds'],
hoja_control_json['ppm'],
hoja_control_json['visitas'],
fecha_comprado
)
)
id_hoja_control = cursor.lastrowid
insert_ids = []
connection.commit()
for row in hoja_control_detalle_json:
if row:
# if row['id'] is None or row['fk_hoja_control'] == 0:
fk_detalle_hoja_control = -1;
if 'id' in row:
valor_ppm = None;
if 'ppm' in row:
valor_ppm = row['ppm']
valor_tds = None;
if 'tds' in row:
valor_tds = row['tds']
cursor.execute(query_hoja_control_detalle_update,
(
(row['factura']).strip(),
cls.getFechaFormateada(row['fecha_mantenimiento']),
(row['recibo']).strip(),
(row['hoja_control']).strip(),
(row['descripcion']).strip(),
(row['persona_autoriza']).strip(),
(row['firma_url']).strip(),
(row['cedula_autoriza']).strip(),
(row['persona_dio_mantenimiento']).strip(),
(row['cedula_dio_mantenimiento']).strip(),
row['ppm'],
row['tds'],
row['id'],
row['fk_hoja_control']
)
)
insert_ids.append(row['id'])
fk_detalle_hoja_control = row['id']
else:
valor_ppm = None;
if 'ppm' in row:
valor_ppm = row['ppm']
valor_tds = None;
if 'tds' in row:
valor_tds = row['tds']
cursor.execute(query_hoja_control_detalle_insert,
(
id_hoja_control,
(row['factura']).strip(),
cls.getFechaFormateada(row['fecha_mantenimiento']),
(row['recibo']).strip(),
(row['hoja_control']).strip(),
(row['descripcion']).strip(),
(row['persona_autoriza']).strip(),
(row['firma_url']).strip(),
(row['cedula_autoriza']).strip(),
(row['persona_dio_mantenimiento']).strip(),
(row['cedula_dio_mantenimiento']).strip(),
valor_ppm,
valor_tds
)
)
insert_ids.append(cursor.lastrowid)
fk_detalle_hoja_control = cursor.lastrowid
connection.commit()
# y aqui comienzo a Guardar las filtraciones
query_hoja_control_detalle_insert_filtracion="""
INSERT INTO hoja_control_detalle_filtracion (fk_hoja_control_detalle, fk_filtracion, valor_filtracion, descripcion)
VALUES (%s, %s, %s, %s)
"""
query_hoja_control_detalle_update_filtracion="""
UPDATE hoja_control_detalle_filtracion
SET fk_filtracion = %s,
valor_filtracion = %s,
descripcion = %s,
updated_at = CURRENT_TIMESTAMP()
WHERE id = %s
AND fk_hoja_control_detalle = %s
"""
# Aqui voy a extraer el array de las filtraciones
filtraciones_list_json = row['filtraciones_list']
for filtracion in filtraciones_list_json:
if filtracion['id']==0:
cursor.execute(query_hoja_control_detalle_insert_filtracion,
(
fk_detalle_hoja_control,
filtracion['fk_filtracion'],
filtracion['cantidad'],
(filtracion['descripcion']).strip()
)
)
else:
cursor.execute(query_hoja_control_detalle_update_filtracion,
(
filtracion['fk_filtracion'],
filtracion['cantidad'],
(filtracion['descripcion']).strip(),
filtracion['id'],
filtracion['fk_hoja_control_detalle']
)
)
connection.commit()
# Para los que elimino
if 'deletedHojaControlItemIds' in hoja_control_json:
deleted_hoja_control_items_id = hoja_control_json['deletedHojaControlItemIds']
ids_borrar = deleted_hoja_control_items_id.split(',')
for id_hoja_control_detalle in ids_borrar:
cursor.execute(query_hoja_control_detalle_delete, (id_hoja_control_detalle, id_hoja_control))
cursor.execute(query_hoja_control_detalle_filtracion_delete, (id_hoja_control_detalle,))
connection.commit()
connection.close()
return {'id_hoja_control': id_hoja_control, 'ids_detalles': insert_ids}
@classmethod
def getFechaFormateada(cls, fecha_no_formateada):
if fecha_no_formateada:
fecha_comprado = re.search('\d{4}-\d{2}-\d{2}', fecha_no_formateada)
fecha_formateada = datetime.datetime.strptime(fecha_comprado.group(), '%Y-%m-%d').date()
else:
fecha_formateada = None
return fecha_formateada
| [
"[email protected]"
] | |
d2b9df7e94f38ee983c30d5363ebc16205dda68c | b3bb0d8e42e1a38e5cd84e64e9990bf05d63ddde | /ft/tests.py | 7ff0161f40a31368a807238d6d6df32b473c74ad | [] | no_license | vassily-la/obey01 | 7a9b2433f1b7c09a210ab77c108d3caa097f26e4 | 958badabf5549dd627150b5f94af13324dfd159b | refs/heads/master | 2020-03-08T20:38:43.552362 | 2018-04-09T17:59:52 | 2018-04-09T17:59:52 | 128,387,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,570 | py | import os, time
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.keys import Keys
MAX_WAIT = 10
class NewVisitorTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
staging_server = os.environ.get('STAGING_SERVER')
if staging_server:
self.live_server_url = 'http://' + staging_server
def tearDown(self):
self.browser.quit()
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_can_start_a_list_for_one_user(self):
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
self.browser.get(self.live_server_url)
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very
# methodical)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
# The page updates again, and now shows both items on her list
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# Satisfied, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy peacock feathers')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# She notices the input box is nicely centered
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=10
)
# She starts a new list and sees the input is nicely
# centered there too
inputbox.send_keys('testing')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: testing')
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=10
)
| [
"[email protected]"
] | |
77d43ac1bd2334701f9e03ccc7ded589d57e8c40 | 77c820198567519f8ce106c75000a03167942347 | /credentials.py | 0a0604a817841e7318db81318ae50f8c23ca0222 | [] | no_license | cmhoc/cmhocclerk | 6a3be10f89100104d3468e3c0354baddbdaa1917 | f973f57a28b731e2d474cb2c8077ef9813f423f7 | refs/heads/master | 2020-03-15T11:45:16.850778 | 2018-05-20T15:07:49 | 2018-05-20T15:07:49 | 132,127,321 | 0 | 1 | null | 2018-05-05T22:35:10 | 2018-05-04T10:43:11 | Python | UTF-8 | Python | false | false | 1,400 | py | #Authorization Credentials for Google Sheets and this specific bot
#Code made by google, modified by /u/thehowlinggreywolf
from __future__ import print_function
import os
from oauth2client.file import Storage
from oauth2client import client
from oauth2client import tools
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Rome Bot v1.5'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
CLIENT_SECRET_FILE)
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
print('Storing credentials to ' + credential_path)
return credentials | [
"[email protected]"
] | |
80257832860ad0a22b6782d158b13e16675b9141 | 339ee457c642bd7990d312b6e309dcf636a04c62 | /Codechef Lunchtime 2019/April/FASTFOOD.py | 1e4b67df2b0a4dbf3dd713a67bb9f08dbbc39643 | [] | no_license | RajatGupta2138/Programming-Practice | e8c6341172560845d62cae5467102e5fc92ae5b9 | ddef427396494ab9cb345f2c38c9bf52fda2911b | refs/heads/master | 2022-11-13T04:51:29.624023 | 2020-07-05T16:02:35 | 2020-07-05T16:02:35 | 277,070,055 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | for _ in range(int(input())):
N=int(input())
A=list(map(int, input().split()))
B=list(map(int, input().split()))
profit=sum(A)
maxprofit=profit
for i in range(N-1,-1,-1):
profit=profit-A[i]+B[i]
maxprofit=max(maxprofit, profit)
print(maxprofit)
| [
"[email protected]"
] | |
0f972d9e866b624fe94d02c98fb1a139c1dfd6d9 | 4294eac3ddd33e3e14b8de0b8f5cea3be891bc0f | /.buildozer/android/platform/build-armeabi-v7a/build/other_builds/numpy-python3/armeabi-v7a__ndk_target_21/numpy/numpy/random/setup.py | 44af1806412da18376739329ce96f027f30d29b3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause",
"Python-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | VPetras/mobile-test-app | 9093a4bd3b17c9db96f08e413216753d2f0af263 | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | refs/heads/master | 2020-11-30T19:46:17.148262 | 2019-12-30T20:51:19 | 2019-12-30T20:51:19 | 230,459,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | from __future__ import division, print_function
from os.path import join
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
def needs_mingw_ftime_workaround():
# We need the mingw workaround for _ftime if the msvc runtime version is
# 7.1 or above and we build with mingw ...
# ... but we can't easily detect compiler version outside distutils command
# context, so we will need to detect in randomkit whether we build with gcc
msver = get_msvc_build_version()
if msver and msver >= 8:
return True
return False
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
if sys.platform == 'win32':
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
if sys.platform[:3] == "aix":
defs = [('_LARGE_FILES', None)]
else:
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = ['m']
# Configure mtrand
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends=[join('mtrand', '*.h'),
join('mtrand', '*.pyx'),
join('mtrand', '*.pxi'),],
define_macros=defs,
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| [
"[email protected]"
] | |
65d79a8adef04fdbc70e590cb1025aa748e9e9a7 | eae50e84483d129d915ea62692ef34b9ac64af68 | /moss/core/migrations/0001_initial.py | 5ad773d1cb1eff1326054797c9e83c3ab9813c35 | [] | no_license | Elicarlos/ifpi-project | 785cba465a90bf93f0ac61674f990e1853d96fd5 | fb13556726fe4e2317223e88ed4fc53261667d04 | refs/heads/master | 2020-03-23T14:08:13.390179 | 2018-07-27T06:08:25 | 2018-07-27T06:08:25 | 141,658,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | # Generated by Django 2.0.6 on 2018-07-26 23:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chamado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cnpj', models.CharField(max_length=17)),
('nome', models.CharField(max_length=50)),
('defeito', models.CharField(max_length=200)),
('nome_sistema', models.CharField(max_length=50)),
('data_abertura', models.DateTimeField(verbose_name='Data de Abertura de Chamdado')),
],
),
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cnpj', models.CharField(max_length=17)),
('razao_social', models.CharField(max_length=50)),
('fantasia', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Tecnico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
],
),
]
| [
"[email protected]"
] | |
42ae24c5e27301e7c1af69885ee6fb72db779a79 | 89f1c415c744170e268f33c403214ecc197d8375 | /Chapter11/CNN_TDC_STRATEGY.py | 9f94cc245da8e221c319e855b5f9d7efe17e4548 | [
"MIT"
] | permissive | deadphilosopher/Artificial-Intelligence-By-Example | 620abc5e05689b537249ee84c3b0b6370ebd3ec1 | fbb5fb56c816d5cefefe2cdad0affcd7b3321081 | refs/heads/master | 2022-11-02T08:21:14.271368 | 2022-06-13T15:07:54 | 2022-06-13T15:07:54 | 193,830,647 | 0 | 0 | MIT | 2019-06-26T04:41:05 | 2019-06-26T04:41:05 | null | UTF-8 | Python | false | false | 2,418 | py | #Copyright 2018 Denis Rothman MIT License. See LICENSE.
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
import keras
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from keras.models import load_model
import numpy as np
from PIL import Image
import scipy.misc
#loads,traffic,food processing
A=['dataset_O/','dataset_traffic/','dataset/']
MS1=['loaded','jammed','productive']
MS2=['unloaded','change','gap']
display=1 #display images
scenario=0 #reference to A,MS1,MS2
directory=A[scenario] #transfer learning parameter (choice of images)
CRLMN=1 # concept learning
print("directory",directory)
#____________________LOAD MODEL____________________________
json_file = open(directory+'model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model=model_from_json(loaded_model_json)
#___________________ load weights into new model
loaded_model.load_weights(directory+"model/model.h5")
print("Strategy model loaded from training repository.")
# __________________compile loaded model
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
#___________________IDENTIFY IMAGE FUNCTION_____________________
def identify(target_image):
filename = target_image
original = load_img(filename, target_size=(64, 64))
#print('PIL image size',original.size)
if(display==1):
plt.imshow(original)
plt.show()
numpy_image = img_to_array(original)
arrayresized = scipy.misc.imresize(numpy_image, (64,64))
#print('Resized',arrayresized)
inputarray = arrayresized[np.newaxis,...] # extra dimension to fit model
#___________________PREDICTION___________________________
prediction1 = loaded_model.predict_proba(inputarray)
prediction2 = loaded_model.predict(inputarray)
print("image",target_image,"predict_proba:",prediction1,"predict:",prediction2)
return prediction1
#___________________SEARCH STRATEGY_____________________
s=identify(directory+'classify/img1.jpg')
s1=int(s[0])
if (int(s1)==0):
print('Classified in class A')
print(MS1[scenario])
print('Seeking...')
s=identify(directory+'classify/img2.jpg')
s1=int(s[0])
if (int(s1)==1):
print('Classified in class B')
print(MS2[scenario])
| [
"[email protected]"
] | |
fefc9585e3a091c97ccd12fa459892e979c1dbf5 | 55d82976caaad6ef095b8a43cf4edd73f7bd54f3 | /4_django/board/board/settings.py | e8d169ff69c4017df6ff44e41595c4d30baa2ddc | [] | no_license | asy0239/TIL | bfa2bf2856bd534777edc1cdf53465cc1f78784a | 8493cf08cca43c5b0947a3469bae9a6d057c760a | refs/heads/master | 2020-05-27T19:18:53.910448 | 2019-06-18T07:37:42 | 2019-06-18T07:37:42 | 188,759,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | """
Django settings for board project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#f9ksn&9$y_lri2sw2vg)vc=l$5lz0)f#6=)!r-(n3i9vfl9($'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'articles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'board.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'board.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
88c2a025bf018c2548daf79a448628a19daa9545 | 8c522f3f1116882c9a83db8faa6730a154b3d212 | /python/Clicker.py | 1181580587abce3d3cf2c986af2c7a8ed77b86a1 | [] | no_license | jaredad7/CSC_BACKUPS | c6efb86edcfa234f765e2dd51288d101a616a980 | 271652edac1f1783922782d37b519a71be90a868 | refs/heads/master | 2020-03-21T09:27:57.993691 | 2018-06-23T13:03:02 | 2018-06-23T13:03:02 | 138,401,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # Click Counter
# Demonstrates binding an event with an event handler
from tkinter import *
class Application(Frame):
def __init__(self, master):
super(Application, self).__init__(master)
self.grid()
self.button_clicks = 0 #Number of times the button has been clicked
self.create_widget()
def create_widget(self):
self.bttn = Button(self)
self.bttn["text"] = "Total Clicks: 0"
self.bttn["command"] = self.update_count
self.bttn.grid()
def update_count(self):
self.button_clicks += 1
self.bttn["text"] = "Total Clicks: " + str(self.button_clicks)
#main
root = Tk()
root.title("Click Counter")
root.geometry("200x50")
app = Application(root)
root.mainloop()
| [
"[email protected]"
] | |
0d253c67c05313df25a84c93f04f1a0f4bb03981 | d7ecb49576e6b0e2582a7473ac3e9712b23adc83 | /datasets/transforms.py | 4016b6d0bd18b110da7fc23e02e74fb163f22858 | [
"MIT",
"Apache-2.0"
] | permissive | Tarandro/MOTR_4 | 9a6fe6928ddf2de40d2ceabdb3ffd0d791ac3bd9 | bd8e53d7ea0584f06ccf032b056b327c87986ca7 | refs/heads/master | 2023-08-03T03:31:53.804147 | 2021-09-24T21:19:12 | 2021-09-24T21:19:12 | 408,173,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,758 | py | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Transforms and data augmentation for both image + bbox.
"""
import copy
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
import numpy as np
import os
def crop_mot(image, image2, target, region):
cropped_image = F.crop(image, *region)
cropped_image2 = F.crop(image2, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if 'obj_ids' in target:
fields.append('obj_ids')
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
for i, box in enumerate(cropped_boxes):
l, t, r, b = box
if l < 0:
l = 0
if r < 0:
r = 0
if l > w:
l = w
if r > w:
r = w
if t < 0:
t = 0
if b < 0:
b = 0
if t > h:
t = h
if b > h:
b = h
cropped_boxes[i] = torch.tensor([l, t, r, b], dtype=box.dtype)
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, cropped_image2, target
def random_shift(image, image2, target, region, sizes):
oh, ow = sizes
# step 1, shift crop and re-scale image firstly
cropped_image = F.crop(image, *region)
cropped_image = F.resize(cropped_image, sizes)
cropped_image2 = F.crop(image2, *region)
cropped_image2 = F.resize(cropped_image2, sizes)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if 'obj_ids' in target:
fields.append('obj_ids')
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
for i, box in enumerate(cropped_boxes):
l, t, r, b = box
if l < 0:
l = 0
if r < 0:
r = 0
if l > w:
l = w
if r > w:
r = w
if t < 0:
t = 0
if b < 0:
b = 0
if t > h:
t = h
if b > h:
b = h
# step 2, re-scale coords secondly
ratio_h = 1.0 * oh / h
ratio_w = 1.0 * ow / w
cropped_boxes[i] = torch.tensor([ratio_w * l, ratio_h * t, ratio_w * r, ratio_h * b], dtype=box.dtype)
cropped_boxes = cropped_boxes.reshape(-1, 2, 2)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, cropped_image2, target
def crop(image, image2, target, region):
cropped_image = F.crop(image, *region)
cropped_image2 = F.crop(image2, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if 'obj_ids' in target:
fields.append('obj_ids')
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, cropped_image2, target
def hflip(image, image2, target):
flipped_image = F.hflip(image)
flipped_image2 = F.hflip(image2)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, flipped_image2, target
def resize(image, image2, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
rescaled_image2 = F.resize(image2, size)
if target is None:
return rescaled_image, rescaled_image2, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target['masks'] = interpolate(
target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, rescaled_image2, target
def pad(image, image2, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
padded_image2 = F.pad(image2, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, padded_image2, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, padded_image2, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, img2, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, img2, target, region)
class MotRandomCrop(RandomCrop):
def __call__(self, imgs: list, imgs2: list, targets: list):
ret_imgs = []
ret_imgs2 = []
ret_targets = []
region = T.RandomCrop.get_params(imgs[0], self.size)
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, targets_i = crop(img_i, img_i2, targets_i, region)
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class FixedMotRandomCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, imgs: list, imgs2: list, targets: list):
ret_imgs = []
ret_imgs2 = []
ret_targets = []
w = random.randint(self.min_size, min(imgs[0].width, self.max_size))
h = random.randint(self.min_size, min(imgs[0].height, self.max_size))
region = T.RandomCrop.get_params(imgs[0], [h, w])
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, targets_i = crop_mot(img_i, img_i2, targets_i, region)
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class MotRandomShift(object):
def __init__(self, bs=1):
self.bs = bs
def __call__(self, imgs: list, imgs2: list, targets: list):
ret_imgs = copy.deepcopy(imgs)
ret_imgs2 = copy.deepcopy(imgs2)
ret_targets = copy.deepcopy(targets)
n_frames = len(imgs)
select_i = random.choice(list(range(n_frames)))
w, h = imgs[select_i].size
xshift = (100 * torch.rand(self.bs)).int()
xshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1
yshift = (100 * torch.rand(self.bs)).int()
yshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1
ymin = max(0, -yshift[0])
ymax = min(h, h - yshift[0])
xmin = max(0, -xshift[0])
xmax = min(w, w - xshift[0])
region = (int(ymin), int(xmin), int(ymax-ymin), int(xmax-xmin))
ret_imgs[select_i], ret_imgs2[select_i], ret_targets[select_i] = random_shift(imgs[select_i], imgs2[select_i], targets[select_i], region, (h,w))
return ret_imgs, ret_imgs2, ret_targets
class FixedMotRandomShift(object):
def __init__(self, bs=1, padding=50):
self.bs = bs
self.padding = padding
def __call__(self, imgs: list, imgs2: list, targets: list):
ret_imgs = []
ret_imgs2 = []
ret_targets = []
n_frames = len(imgs)
w, h = imgs[0].size
xshift = (self.padding * torch.rand(self.bs)).int() + 1
xshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1
yshift = (self.padding * torch.rand(self.bs)).int() + 1
yshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1
ret_imgs.append(imgs[0])
ret_imgs2.append(imgs2[0])
ret_targets.append(targets[0])
for i in range(1, n_frames):
ymin = max(0, -yshift[0])
ymax = min(h, h - yshift[0])
xmin = max(0, -xshift[0])
xmax = min(w, w - xshift[0])
prev_img = ret_imgs[i-1].copy()
prev_img2 = ret_imgs2[i - 1].copy()
prev_target = copy.deepcopy(ret_targets[i-1])
region = (int(ymin), int(xmin), int(ymax - ymin), int(xmax - xmin))
img_i, img_i2, target_i = random_shift(prev_img, prev_img2, prev_target, region, (h, w))
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(target_i)
return ret_imgs, ret_imgs2, ret_targets
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, img2: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, img2, target, region)
class MotRandomSizeCrop(RandomSizeCrop):
def __call__(self, imgs, imgs2, targets):
w = random.randint(self.min_size, min(imgs[0].width, self.max_size))
h = random.randint(self.min_size, min(imgs[0].height, self.max_size))
region = T.RandomCrop.get_params(imgs[0], [h, w])
ret_imgs = []
ret_imgs2 = []
ret_targets = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, targets_i = crop(img_i, img_i2, targets_i, region)
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, img2, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, img2, target, (crop_top, crop_left, crop_height, crop_width))
class MotCenterCrop(CenterCrop):
def __call__(self, imgs, imgs2, targets):
image_width, image_height = imgs[0].size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
ret_imgs = []
ret_imgs2 = []
ret_targets = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, targets_i = crop(img_i, img_i2, targets_i, (crop_top, crop_left, crop_height, crop_width))
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, img2, target):
if random.random() < self.p:
return hflip(img, img2, target)
return img, img2, target
class MotRandomHorizontalFlip(RandomHorizontalFlip):
def __call__(self, imgs, imgs2, targets):
if random.random() < self.p:
ret_imgs = []
ret_imgs2 = []
ret_targets = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, targets_i = hflip(img_i, img_i2, targets_i)
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
return imgs, imgs2, targets
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, img2, target=None):
size = random.choice(self.sizes)
return resize(img, img2, target, size, self.max_size)
class MotRandomResize(RandomResize):
def __call__(self, imgs, imgs2, targets):
size = random.choice(self.sizes)
ret_imgs = []
ret_imgs2 = []
ret_targets = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, targets_i = resize(img_i, img_i2, targets_i, size, self.max_size)
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, img2, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, img2, target, (pad_x, pad_y))
class MotRandomPad(RandomPad):
def __call__(self, imgs, imgs2, targets):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
ret_imgs = []
ret_imgs2 = []
ret_targets = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
img_i, img_i2, target_i = pad(img_i, img_i2, targets_i, (pad_x, pad_y))
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, img2, target):
if random.random() < self.p:
return self.transforms1(img, img2, target)
return self.transforms2(img, img2, target)
class MotRandomSelect(RandomSelect):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __call__(self, imgs, imgs2, targets):
if random.random() < self.p:
return self.transforms1(imgs, imgs2, targets)
return self.transforms2(imgs, imgs2, targets)
class ToTensor(object):
def __call__(self, img, img2, target):
return F.to_tensor(img), F.to_tensor(img2), target
class MotToTensor(ToTensor):
def __call__(self, imgs, imgs2, targets):
ret_imgs = []
ret_imgs2 = []
for img in imgs:
ret_imgs.append(F.to_tensor(img))
for img2 in imgs2:
ret_imgs2.append(F.to_tensor(img2))
return ret_imgs, ret_imgs2, targets
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, img2, target):
return self.eraser(img), self.eraser(img2), target
class MotRandomErasing(RandomErasing):
def __call__(self, imgs, imgs2, targets):
# TODO: Rewrite this part to ensure the data augmentation is same to each image.
ret_imgs = []
ret_imgs2 = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
ret_imgs.append(self.eraser(img_i))
ret_imgs2.append(self.eraser(img_i2))
return ret_imgs, ret_imgs2, targets
class MoTColorJitter(T.ColorJitter):
def __call__(self, imgs, imgs2, targets):
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
ret_imgs = []
ret_imgs2 = []
for img_i, img_i2, targets_i in zip(imgs, imgs2, targets):
ret_imgs.append(transform(img_i))
ret_imgs2.append(transform(img_i2))
return ret_imgs, targets
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, image2, target=None):
if target is not None:
target['ori_img'] = image.clone()
image = F.normalize(image, mean=self.mean, std=self.std)
image2 = F.normalize(image2, mean=self.mean, std=self.std)
if target is None:
return image, image2, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, image2, target
class MotNormalize(Normalize):
def __call__(self, imgs, imgs2, targets=None):
ret_imgs = []
ret_imgs2 = []
ret_targets = []
for i in range(len(imgs)):
img_i = imgs[i]
img_i2 = imgs2[i]
targets_i = targets[i] if targets is not None else None
img_i, img_i2, targets_i = super().__call__(img_i, img_i2, targets_i)
ret_imgs.append(img_i)
ret_imgs2.append(img_i2)
ret_targets.append(targets_i)
return ret_imgs, ret_imgs2, ret_targets
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, image2, target):
for t in self.transforms:
image, image2, target = t(image, image2, target)
return image, image2, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class MotCompose(Compose):
def __call__(self, imgs, imgs2, targets):
for t in self.transforms:
imgs, imgs2, targets = t(imgs, imgs2, targets)
return imgs, imgs2, targets
| [
"[email protected]"
] | |
dfef6512a4ea340a12e92ce730d9052f302614d1 | c7bca86a776c3f1a267fc8bad37b1badb3985a42 | /scraper.py | 585e027ab419692015761038bf3afd1c9c58aa78 | [] | no_license | taenin/Riot-Games-NURF-Project | 59df22e13c9cd630fff98e8cffa4d6046d978ddb | e7dab5e0d079168b65c0b882bacc2646689db65f | refs/heads/master | 2021-01-10T19:14:12.640683 | 2015-04-11T00:20:00 | 2015-04-11T00:20:00 | 33,195,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import timers
import time
import sys
import updater
def main(args):
startTime = int(args[1])
mainWorker = updater.Worker(startTime)
comThread = timers.BackgroundUpdater(1.5, mainWorker.updateInformation).start()
try:
while True:
time.sleep(1)
except:
print "Exiting ..."
if __name__ == '__main__':
main(sys.argv)
| [
"[email protected]"
] | |
a944005aa57a366320289b4317d4667ee2b9e3e1 | f2a2f7eb7a8819365909bcf84f258273a4f1e086 | /mlp/errors.py | 712fe594949595e97fb61ce0b8c6d2f9cbcec120 | [] | no_license | gracecxj/mlpractical | 73398ba0de8b67de47a93f955772622ff23cde12 | 24cf59b04e7d106f143fdb84573639f1e39bd628 | refs/heads/mlp2017-8/master | 2021-07-20T05:17:01.758744 | 2017-09-29T17:00:52 | 2017-09-29T17:00:52 | 106,006,944 | 1 | 0 | null | 2017-10-06T13:19:57 | 2017-10-06T13:19:57 | null | UTF-8 | Python | false | false | 1,452 | py | # -*- coding: utf-8 -*-
"""Error functions.
This module defines error functions, with the aim of model training being to
minimise the error function given a set of inputs and target outputs.
The error functions will typically measure some concept of distance between the
model outputs and target outputs, averaged over all data points in the data set
or batch.
"""
import numpy as np
class SumOfSquaredDiffsError(object):
"""Sum of squared differences (squared Euclidean distance) error."""
def __call__(self, outputs, targets):
"""Calculates error function given a batch of outputs and targets.
Args:
outputs: Array of model outputs of shape (batch_size, output_dim).
targets: Array of target outputs of shape (batch_size, output_dim).
Returns:
Scalar error function value.
"""
raise NotImplementedError()
def grad(self, outputs, targets):
"""Calculates gradient of error function with respect to outputs.
Args:
outputs: Array of model outputs of shape (batch_size, output_dim).
targets: Array of target outputs of shape (batch_size, output_dim).
Returns:
Gradient of error function with respect to outputs. This should be
an array of shape (batch_size, output_dim).
"""
raise NotImplementedError()
def __repr__(self):
return 'SumOfSquaredDiffsError'
| [
"[email protected]"
] | |
5c53c405c769ad6e96eb92359df716a95fa1a8c6 | 02de7815b28c3ea7c311365f9ea3fcd64a35da4b | /tests/tests.py | 30f06a85495130f4bed585b5f8316c2dd6c067e4 | [] | no_license | cheeseywhiz/pyt | 73499b3a734c2702b7082fffd571da1224d2a006 | 73ffa77ac347fe6b4c9a00e5ef12b4a0d29ee7ec | refs/heads/master | 2020-03-25T06:15:31.495862 | 2019-01-31T21:41:24 | 2019-01-31T21:41:24 | 143,491,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | from chars import *
def join(*lines):
return ''.join(lines)
def escape_command(seq):
return ESC + seq
def control_sequence(final_byte, *args):
body = SEP.join(map(str, args))
return escape_command(f'{CSI}{body}{final_byte}')
def cursor_character_absolute(nth_col=None):
if nth_col is None:
nth_col = 0
return control_sequence(CHA, nth_col + 1)
def cursor_forward_tabulation(n_tabs=None):
if n_tabs is None:
n_tabs = 1
return control_sequence(CHT, n_tabs)
def tabuation_clear(selection=None):
if selection is None:
selection = 0
return control_sequence(TBC, selection)
def character_tabulation_set():
return escape_command(HTS)
def line_position_absolute(nth_line=None):
if nth_line is None:
nth_line = 1
return control_sequence(VPA, nth_line)
def reset_to_initial_state():
return escape_command(RIS)
| [
"[email protected]"
] | |
270dfd7fcb19c5dee5f5d2c6f5ec30f802192865 | 83586573a57401d6f35c88b9753efe65afcae612 | /hw1/exp1_2.py | 0a85262d203bdfc4aeabfeaaf57e6ec1277b32ca | [] | no_license | wcLegend/data_mining | 0f96f25a9761f9b6df4337e31c7f7020b774b059 | 59c888f43a7e52a0f77b8972c4a9fe08db6d9a7c | refs/heads/master | 2020-07-29T08:56:01.643618 | 2019-12-24T02:06:11 | 2019-12-24T02:06:11 | 209,737,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,174 | py | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce
dimensionality and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
import warnings
warnings.filterwarnings("ignore")
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans,AffinityPropagation,MeanShift,SpectralClustering,AgglomerativeClustering,DBSCAN
from sklearn.mixture import GaussianMixture
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.") #使用潜在语义分析对文档进行预处理
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).") #使用普通的k均值算法(在批处理模式下)
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.") #禁用TF-IDF加权
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
def document_clustering(estimator,name,X):
#print("Clustering sparse data with %s" % estimator)
t0 = time()
estimator.fit(X.toarray())# 用toarray也行 但用了svd降维
#print("done in %0.3fs" % (time() - t0))
#print()
if hasattr(estimator, 'labels_'):
y_pred = estimator.labels_
else:
y_pred = estimator.predict(X)
print('%-9s\t\t\t%.2fs\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0),
metrics.normalized_mutual_info_score(labels, y_pred),
metrics.homogeneity_score(labels, y_pred),
metrics.completeness_score(labels, y_pred),
metrics.v_measure_score(labels, y_pred),
metrics.adjusted_rand_score(labels, y_pred),
metrics.adjusted_mutual_info_score(labels, y_pred,
average_method='arithmetic')))
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset "
"using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', alternate_sign=False,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
alternate_sign=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
# #############################################################################
# Do the actual clustering
print(82 * '_')
print('init\t\t\t\ttime\tNMI\t\thomo\tcompl\tv-meas\tARS\t\tAMI')
mbkm = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
document_clustering(mbkm,'MBKMeans',X)
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
document_clustering(km,'KMeans',X)
af = AffinityPropagation(preference=-50,convergence_iter = 10,verbose=opts.verbose) #效果很差
document_clustering(af,'AfPro',X)
ms = MeanShift(bandwidth=2)
document_clustering(ms,'MeanS',X)
whc = AgglomerativeClustering(n_clusters=4,linkage='ward')
document_clustering(whc,'Ward/Aggclu',X)
chc = AgglomerativeClustering(n_clusters=4,linkage='complete')
document_clustering(chc,'comp/Aggclu',X)
ahc = AgglomerativeClustering(n_clusters=4,linkage='average')
document_clustering(ahc,'ave/Aggclu',X)
shc = AgglomerativeClustering(n_clusters=4,linkage='single') #效果很差
document_clustering(shc,'sin/Aggclu',X)
sc = SpectralClustering(n_clusters=4, eigen_solver='arpack', affinity="nearest_neighbors")
document_clustering(sc,'SpeClu',X)
db = DBSCAN() #效果很差 当SVD取K=100的时候效果还行
document_clustering(db,'DBSCAN',X)
gau = GaussianMixture(n_components = 4, covariance_type='full')
document_clustering(gau,'GauMix',X)
"""
print("NMI: %0.3f"%metrics.normalized_mutual_info_score(labels, km.labels_))
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print()
"""
"""
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
""" | [
"[email protected]"
] | |
07322d8ea7f676088aa2cbbcdaf68b58ce468bf3 | e6ce5cfa880637c651580eaf013916fb915ad019 | /TR2_RP/src/rp_mlp_loo.py | 007ba952819ad404c36c7071624891bef884640e | [] | no_license | faellacurcio/rp | f3c760deff16a411ed55121baf2a4741934aed36 | a23d31f12382d36a5e2b03a39f7a6800a7194566 | refs/heads/master | 2020-05-03T11:56:42.253345 | 2019-06-18T12:48:29 | 2019-06-18T12:48:29 | 178,612,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | #importa as bibliotecas
import numpy
from sklearn.model_selection import LeaveOneOut
from sklearn.neural_network import MLPClassifier
from plot_confusion_matrix import plot_confusion_matrix
import matplotlib.pyplot as plt
def fix_Y(vec_input):
mat = numpy.zeros([vec_input.shape[0], int(numpy.amax(vec_input))])
for idx, element in enumerate(Y):
mat[idx][int(element)-1] = 1
return mat
samples = list()
# Abre e lê dermatology como um dataset
with open('dermatology.dat') as iris:
for row in iris.readlines():
samples.append(list(map(float, row.split(","))))
dataset = numpy.array(samples)
# separa os dados em features e classe (X/Y)
X = dataset[:,:-1]
Y = dataset[:,dataset.shape[1]-1]
Y = fix_Y(Y)
# Função de normalização
def zscore(X):
X = X - numpy.mean(X, axis=0)
X = X / numpy.std(X, axis=0, ddof=1)
return X
# X = zscore(X)
clf = MLPClassifier(hidden_layer_sizes=(50,), max_iter=100, alpha=1e-4,
solver='lbfgs', verbose=0, tol=1e-2, random_state=1,
learning_rate_init=.1)
confusion_y_test = []
confusion_y_pred = []
# Separa as amotras utilizando leave one ou
cross_val = LeaveOneOut()
cross_val.get_n_splits(X)
total = len(X)
success = 0.0
# loop que roda a classificação nos diferentes grupos de validação
for train_index, test_index in cross_val.split(X,Y):
#Separa em treinamento e amostras
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
# classifica as amostras da base de teste
clf.fit(X_train, Y_train)
y = clf.predict(X_test)
#armazena o sucesso
y = numpy.argmax(y,axis=1)
Y_test = numpy.argmax(Y_test,axis=1)
confusion_y_test.extend(Y_test)
confusion_y_pred.extend(y)
success += sum(y == Y_test)
# calcula e imprime o resultado.
result = 100*(success/total)
print('%.2f %%' % (result))
plot_confusion_matrix(confusion_y_test, confusion_y_pred, classes=["class 1", "class 2", "class 3", "class 4", "class 5", "class 6"],
title='MLP, 100 neurons, Leave-one-out: '+str('%.2f' % result)+'%')
plt.savefig("q3_mlp_loo_notNorm")
plt.show()
plt.clf() | [
"[email protected]"
] | |
a3f1d4931d6678a9032dead8c990899a2745e000 | 969b7d3c27e0d29ef994616fd1b6c50966168835 | /sprint.py | 1c8a8d8e24bcff5d60251e6b79bde5d220313265 | [] | no_license | liamh17/Python_Files | d8f9e8ed726f184167f1d4af3ea370e0127cdb48 | 968297ab2b25e4fef9c5538f54d51924c3f4f4e4 | refs/heads/master | 2020-12-24T20:24:17.109795 | 2016-06-02T03:22:44 | 2016-06-02T03:22:44 | 58,249,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | import msvcrt
import time
high_score = 50
name = "no-one"
while True:
distance = int(0)
print("\n--------------------------------------------------------------")
print('\n\nWelcome to the 100m sprint, tap z and x rapidly to move!')
print('* = 10m')
print("\n**Current record: " + str(high_score) + "s, by: " + name)
print('\nPress enter to start')
input()
print('Ready...')
time.sleep(1)
print('GO!')
start_time = time.time()
while distance < 100:
k1 = input()
k1 = str(k1)
if k1 == 'z':
k2 = input()
k2 = str(k2)
if k2 == 'x':
distance += 1
if distance == 50:
print("* You're halfway there!")
elif distance % 10 == 0:
print('*')
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
print('Well done you did it in...')
print(fin_time)
if fin_time < high_score:
print("Well done you've got a new high score ")
name = input("Please enter your name : ")
high_score = fin_time
| [
"Liam Heisler"
] | Liam Heisler |
d80efee932216f2f43dc9d15c1826b6099035c05 | ccf7b194ecdff46f02a9e280511709d716d311aa | /ecommercedev/ecommercebd/ecom/views.py | ac9695321f7340532297df91c5772f1570555480 | [] | no_license | imon91/test12 | 24a505a5545eca217436cf6ccff7e54acaca4d30 | 0c28a50f0b66be5c717f5ad28d8e189fbe21b002 | refs/heads/master | 2020-12-17T22:44:04.640363 | 2020-01-21T09:37:39 | 2020-01-21T09:37:39 | 235,298,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | import datetime
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.shortcuts import get_object_or_404
from django.contrib.auth import login as auth_login, logout as auth_logout, authenticate
# Admin Dashboard
from ecom.models import Order
from ecomadminapp.models import ProductCategory
from ecomadminapp.models import Product
from registration.models import CompanyRegistration
def index(request):
context = dict()
context['products'] = Product.objects.filter(is_active=True).order_by('-id')
return render(request, 'ecom/display.html', context)
def product_detail(request, id):
context = dict()
context['product_details'] = get_object_or_404(Product, pk=id)
return render(request, 'ecom/product_detail.html', context)
def addcart(request):
product = Product.objects.get(pk=request.GET.get('product_id'))
quantity = request.GET.get('quantity')
request.session['product'] = "product"
request.session['quantity'] = "quantity"
return HttpResponse("Added cart successfully.. ! ")
| [
"[email protected]"
] | |
515ab8d53a5ddeadafdaff27d99130f0b0011aa6 | 682a4ab5047a54c56c4585e768afacb209eef42e | /mail_message.py | e0a88a3f3f183632a25056403cbd285f258a4ca1 | [] | no_license | rheami/myMail | 7fbf245250d57bd82ec4eb5950a89896e36df509 | 5a7a193c4cd4751150eaef53c3320e40f98e0d46 | refs/heads/master | 2022-12-26T03:43:46.179053 | 2018-08-13T18:45:54 | 2018-08-13T18:45:54 | 302,132,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,949 | py | # -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return "".join(html_escape_table.get(c,c) for c in text)
class MailMessage(models.Model):
_inherit = 'mail.message'
@api.multi
def on_message_forward(self):
context = dict(self._context or {})
if context['option'] == 'forward':
subject = [_("Fwd")]
header = [
"----------" + _("Forwarded message") + "----------",
_("From: ") + self.email_from,
_("Date: ") + self.date,
]
else:
if context['option'] == 'reply':
subject = [_("Re")]
header = [
"----------" + _("Replyed message") + "----------",
_("From: ") + self.email_from,
_("Date: ") + self.date,
]
if self.record_name and self.parent_id:
subject.append(self.record_name)
if self.subject:
subject.append(self.subject)
else:
if len(subject) < 2:
subject.append(_("No subject"))
if self.subject:
header.append(_("Subject: ") + self.subject)
header = '<br/>'.join(html_escape(s) for s in header)
context = {
'default_parent_id': self.id,
'default_body':
"<p><i>" + header + "</i></p><br/>" +
self.body,
'default_attachment_ids': self.attachment_ids.ids,
'default_partner_ids': self.partner_ids.ids,
'mail_post_autofollow': True,
'mail_post_autofollow_partner_ids': self.partner_ids.ids,
}
# private message: no model, no res_id
is_private = False
if not self.model or not self.res_id:
is_private = True
context["is_private"] = is_private
if self.model:
context["default_model"] = self.model
if self.res_id:
context["default_res_id"] = self.res_id
if self.model and self.res_id:
context["default_destination_object_id"] = ",".join([self.model, str(self.res_id)])
action = self.env['ir.actions.act_window'].for_xml_id('mail_forward', 'compose_action')
action['context'] = context
return action
@api.multi
@api.depends('body')
def _get_body(self):
for message in self:
mybody = u"<hr/>" + message.body + u"<hr/>"
url = None
if message.res_id:
url = '#id=%s&model=%s&view_type=form' % (
message.res_id,
message.model
)
title = _("Associated Model: ")
url = u'<p><b> %s</b><a href="%s">%s</a></p>' % (title, url, message.record_name)
mybody = mybody + url
message.mybody = mybody
@api.multi
@api.depends('body')
def _get_description_short(self):
for message in self:
truncated_text = self.env["ir.fields.converter"].text_from_html(
message.body, 40, 100)
url = None
if message.res_id:
url = '#id=%s&model=%s&view_type=form' % (
message.res_id,
message.model
)
about = message.about
if url:
about = '<a href="%s">%s</a>' % (url, about)
message.short_description = "<h4 class \"oe_msg_title\">" + about + "</h4>" + u": " + truncated_text
@api.multi
@api.depends('res_id')
def _get_model_url(self):
res = {}
for message in self:
url = None
if message.res_id:
url = '#id=%s&model=%s&view_type=form' % (
message.res_id,
message.model
)
title = _("Associated Model: ")
message.url = '<p><b>%s</b><a href="%s">%s</a></p>' % (title, url, message.record_name)
@api.multi
@api.depends('author_id')
def _get_author(self):
for message in self:
author = message.author_id and message.author_id.name_get()[0][1]
url = None
if author:
url = '#id=%s&model=res.partner&view_type=form' % message.author_id.id
image_src = '/web/binary/image?model=mail.message&field=author_avatar&id=%s' % (
message.id)
if author:
message.author = '<a title=%s href="%s"><img height="36px" src="%s"></a>' % (author, url, image_src)
else:
message.author = message.email_from
@api.multi
@api.depends('author_id')
def _get_about(self):
for message in self:
message.about = message.subject or message.record_name or 'UNDEFINED'
short_description = fields.Char(string = "Description", compute=_get_description_short, help='Message description: either the subject, or the beginning of the body', store=False)
author = fields.Char(string="Author", compute=_get_author, store=False)
about = fields.Char(string="About", compute=_get_about, store=False)
# url = fields.Char(string="url", compute=_get_model_url, store=False)
mybody = fields.Html(string="Contents", help='Automatically sanitized HTML contents',
compute=_get_body, store=False)
# todo date epoch arrondi sur le jour : pour groupby
# @api.multi
# @api.depends("to_read")
# def _on_open_set_messages_read(self):
# context = dict(self._context or {})
# for message in self:
# if message.parent_id.id:
# print (message.id, message.parent_id)
# # self[0].set_message_read(True) # only first not the parent (assume already be reed if parent)
# # self.refresh()
#
# on_open = fields.Integer(compute="_on_open_set_messages_read", store=False)
@api.multi
def toggle_messages_to_read(self):
for message in self:
to_read = message.to_read
message.set_message_read(to_read)
message.child_ids.set_message_read(to_read)
# message.child_ids.refresh()
# return {'type': 'ir.actions.client', 'tag': 'reload'}
@api.multi
def toggle_messages_starred(self):
for message in self:
message.set_message_starred(not message.starred)
# return { 'type': 'ir.actions.client', 'tag': 'reload' }
@api.multi
def unset_messages_to_read(self):
for message in self:
message.set_message_read(False)
# message.child_ids.set_message_read(False)
# return {'type': 'ir.actions.client', 'tag': 'reload'}
| [
"[email protected]"
] | |
eb2bd8abbc7f912dfa40133c45c02f66526b71a2 | 994c1b533fe64265715231d6458a9f316ce82cbf | /users/forms.py | 34d47092d282474eb8c2de50d4c673242cd68db6 | [] | no_license | zerobubus/hw05_final | 07320a8fcc1119f28cf3bd5e3285d381470f7f47 | ed89bdd5fa088c71bdb127a780fc2eac3bcda47c | refs/heads/master | 2022-11-24T23:09:29.335507 | 2020-07-29T16:49:10 | 2020-07-29T16:49:10 | 282,685,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
User = get_user_model()
# создадим собственный класс для формы регистрации
# сделаем его наследником предустановленного класса UserCreationForm
class CreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ("first_name", "last_name", "username", "email")
| [
"[email protected]"
] | |
0eba2abfbc6b06e28fc7e2042a9f4aeaf406d89e | 91f0ada382a529bb928d3eb122fd65900d0702f5 | /03.Third_week_exersice/03Football_cards.py | 309caf08577fd76f7ab6edecfc7eb6f6c5f285a2 | [] | no_license | skafev/Python_fundamentals | aa62208ae0fdfc9ed6a9dcc0905c4a8e5aefd2be | 959f9bca948cc901ac3f10694f540d1d533585b5 | refs/heads/main | 2023-06-09T19:33:21.370459 | 2021-07-01T16:29:30 | 2021-07-01T16:29:30 | 382,092,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | text = input().split(" ")
team_A = 11
team_B = 11
less_then_seven = False
for text_one in set(text):
if "A" in text_one:
team_A -= 1
if "B" in text_one:
team_B -= 1
if team_A < 7:
less_then_seven = True
break
if team_B < 7:
less_then_seven = True
break
print(f"Team A - {team_A}; Team B - {team_B}")
if less_then_seven:
print("Game was terminated") | [
"[email protected]"
] | |
7481b35cd014efbfc7a8d72bee9a65e12c4e0c85 | 12938461fe1cc5617d3e425937a5d79ceb61a76a | /development/shenzyn/emailTrigger/jobseeker_email.py | 83532fcbcb79a3fd5153c8943da5c9e9e2dead4c | [] | no_license | Megha566/Shenzyn-Test-Aut | 15847b6c50a48957e3738ba40b567be3521360ce | 487751e94fb2bc4745cb1902f76530c58f4bd870 | refs/heads/master | 2023-02-11T19:28:53.923332 | 2021-01-08T07:45:17 | 2021-01-08T07:45:17 | 327,862,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # Python code to illustrate Sending mail with attachments
# from your Gmail account
# libraries to be imported
import smtplib
import glob
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
#Hotmail account, for gmail change the smtp server.
fromaddr = "[email protected]"
toaddr = "[email protected]"
# instance of MIMEMultipart
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = fromaddr
# storing the receivers email address
msg['To'] = toaddr
# storing the subject
msg['Subject'] = "JobSeeker Automation Testing Results"
# string to store the body of the mail
body = "Please find the attached Test Reults for JobSeeker Automation"
# attach the body with the msg instance
msg.attach(MIMEText(body, 'plain'))
#Get the latest file from the Results Directory
list_of_files = glob.glob(r'D:\robotframework\Shenzyn-Test-Aut\development\shenzyn\Results\report-*.html') # * means all if need specific format then *.csv
filename = max(list_of_files, key=os.path.getctime)
# open the file to be sent
attachment = open(filename, "rb")
# instance of MIMEBase and named as p
p = MIMEBase('application', 'octet-stream')
# To change the payload into encoded form
p.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# attach the instance 'p' to instance 'msg'
msg.attach(p)
# creates SMTP session
s = smtplib.SMTP('smtp-mail.outlook.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(fromaddr, "seerat@123")
# Converts the Multipart msg into a string
text = msg.as_string()
# sending the mail
s.sendmail(fromaddr, toaddr, text)
# terminating the session
s.quit()
| [
"[email protected]"
] | |
bf00ede1eb6695ab64ca14b24995b1e1df06d68d | 5c6d7ddebb3a3b7a5e8bd60b92f7ee28d23edb28 | /tile/side/__init__.py | 86bf1a2e8bc80f8276eba4cc8a341a0991e2238d | [] | no_license | jgrowl/pycarcassonne | 6bb4d19813fe31065064d7035d273aaac9eabb6c | fb989b4fcc8005cfc2d1b951f3968ec8fe5b43af | refs/heads/master | 2021-01-18T14:02:02.394650 | 2009-06-17T08:27:39 | 2009-06-17T08:27:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | #!/usr/bin/env python
from side import Side
from top import Top
from right import Right
from bottom import Bottom
from left import Left
| [
"[email protected]"
] | |
a6a2daf271ad89e416af84ed992ecfd9e12ae0b7 | cba3aea3b0f2614f5d7674e93a19caac43508178 | /db.py | 5ba46608243e0dc6b9e080a7d6661370af481ced | [] | no_license | joshua-deans/FitTrkr | a92682d0f0cf880e68186b396c5c89b3d57cf87d | cae51dd19a40d3e9f63e4663bd004396621aeb4b | refs/heads/master | 2020-03-26T07:25:34.263140 | 2019-11-20T08:00:25 | 2019-11-20T08:00:25 | 144,653,775 | 0 | 1 | null | 2019-11-20T08:00:26 | 2018-08-14T01:42:23 | HTML | UTF-8 | Python | false | false | 208 | py | def DBconfig():
dbconfig = {
"host": 'localhost',
"user": 'root',
"password": 'fitness2018!',
"DBName": 'fitTrkr',
"dictDB": 'DictCursor',
}
return dbconfig | [
"[email protected]"
] | |
bb9ea5783768afdeea7cd6a5508620b2d7af587d | dca653bb975528bd1b8ab2547f6ef4f48e15b7b7 | /tags/wxPy-2.9.2.2/wxPython/cfg_version.py | ebe70ff24da82d693db9366fc5c3806f62d8c9c3 | [] | no_license | czxxjtu/wxPython-1 | 51ca2f62ff6c01722e50742d1813f4be378c0517 | 6a7473c258ea4105f44e31d140ea5c0ae6bc46d8 | refs/heads/master | 2021-01-15T12:09:59.328778 | 2015-01-05T20:55:10 | 2015-01-05T20:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py |
# wxPython version numbers used in build
VER_MAJOR = 2 # The first three must match wxWidgets
VER_MINOR = 9
VER_RELEASE = 2
VER_SUBREL = 2 # wxPython release num for x.y.z release of wxWidgets
VER_FLAGS = "" # release flags, such as prerelease or RC num, etc.
| [
"RD@c3d73ce0-8a6f-49c7-b76d-6d57e0e08775"
] | RD@c3d73ce0-8a6f-49c7-b76d-6d57e0e08775 |
6e2244a1026602e0f26a5f99c52efe0e60411d11 | bd8fde6dbf227013c9af44ea29646c397b91ca75 | /demo/book/migrations/0001_initial.py | 8556db280c06fd6d6d0efc58352831b990938a8b | [] | no_license | hersinniji/git_demo_drf | 4aef66008fb808d0bc48c45a5025407d14399df9 | 6a06e0943e88b658d7b3278e2058a7b9de695157 | refs/heads/master | 2020-06-16T08:33:30.915129 | 2019-07-09T15:24:40 | 2019-07-09T15:24:40 | 195,524,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-16 05:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('btitle', models.CharField(max_length=20, verbose_name='名称')),
('bpub_date', models.DateField(verbose_name='发布日期')),
('bread', models.IntegerField(default=0, verbose_name='阅读量')),
('bcomment', models.IntegerField(default=0, verbose_name='评论量')),
('is_delete', models.BooleanField(default=False, verbose_name='逻辑删除')),
],
options={
'verbose_name': '图书',
'verbose_name_plural': '图书',
'db_table': 'tb_books',
},
),
migrations.CreateModel(
name='HeroInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hname', models.CharField(max_length=20, verbose_name='名称')),
('hgender', models.SmallIntegerField(choices=[(0, 'male'), (1, 'female')], default=0, verbose_name='性别')),
('hcomment', models.CharField(max_length=200, null=True, verbose_name='描述信息')),
('is_delete', models.BooleanField(default=False, verbose_name='逻辑删除')),
('hbook', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.BookInfo', verbose_name='图书')),
],
options={
'verbose_name': '英雄',
'verbose_name_plural': '英雄',
'db_table': 'tb_heros',
},
),
]
| [
"[email protected]"
] | |
307573fb0a6e3d11504211c85b45339e89642a92 | 066ee4df594a5dc90335d271b9d5a1b1e2a4d34c | /y/google-cloud-sdk/platform/google_appengine/google/appengine/api/app_identity/app_identity.py | 3e92c752ad31a363b9b061b8fc72201afdbe81a1 | [
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-3-Clause",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ychen820/microblog | a2d82447525325ec58285c2e5db58b79cceaca1b | d379afa2db3582d5c3be652165f0e9e2e0c154c6 | refs/heads/master | 2021-01-20T05:58:48.424357 | 2015-04-28T22:03:09 | 2015-04-28T22:03:09 | 32,948,331 | 0 | 2 | BSD-3-Clause | 2020-07-25T05:04:35 | 2015-03-26T19:45:07 | Python | UTF-8 | Python | false | false | 17,342 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides access functions for the app identity service."""
import os
import time
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api.app_identity import app_identity_service_pb
from google.appengine.runtime import apiproxy_errors
__all__ = ['BackendDeadlineExceeded',
'BlobSizeTooLarge',
'InternalError',
'InvalidScope',
'NotAllowed',
'OperationNotImplemented',
'Error',
'create_rpc',
'make_sign_blob_call',
'make_get_public_certificates_call',
'make_get_service_account_name_call',
'sign_blob',
'get_public_certificates',
'PublicCertificate',
'get_service_account_name',
'get_application_id',
'get_default_version_hostname',
'get_access_token',
'get_access_token_uncached',
'make_get_access_token_call',
'get_default_gcs_bucket_name',
'make_get_default_gcs_bucket_name_call',
]
_APP_IDENTITY_SERVICE_NAME = 'app_identity_service'
_SIGN_FOR_APP_METHOD_NAME = 'SignForApp'
_GET_CERTS_METHOD_NAME = 'GetPublicCertificatesForApp'
_GET_SERVICE_ACCOUNT_NAME_METHOD_NAME = 'GetServiceAccountName'
_GET_DEFAULT_GCS_BUCKET_NAME_METHOD_NAME = 'GetDefaultGcsBucketName'
_GET_ACCESS_TOKEN_METHOD_NAME = 'GetAccessToken'
_PARTITION_SEPARATOR = '~'
_DOMAIN_SEPARATOR = ':'
_MEMCACHE_KEY_PREFIX = '_ah_app_identity_'
_MEMCACHE_NAMESPACE = '_ah_'
_TOKEN_EXPIRY_SAFETY_MARGIN = 300
_MAX_TOKEN_CACHE_SIZE = 100
_MAX_RANDOM_EXPIRY_DELTA = 60
_access_token_cache = {}
_random_cache_expiry_delta = (
hash(time.time()) % (_MAX_RANDOM_EXPIRY_DELTA * 1000) / 1000.0)
class Error(Exception):
"""Base error type."""
class BackendDeadlineExceeded(Error):
"""Communication to backend service timed-out."""
class BlobSizeTooLarge(Error):
"""Size of blob to sign is larger than the allowed limit."""
class InternalError(Error):
"""Unspecified internal failure."""
class InvalidScope(Error):
"""Invalid scope."""
class NotAllowed(Error):
"""The operation is not allowed."""
class OperationNotImplemented(Error):
"""The operation is not implemented for the service account."""
def _to_app_identity_error(error):
"""Translate an application error to an external Error, if possible.
Args:
error: An ApplicationError to translate.
Returns:
error: app identity API specific error message.
"""
error_map = {
app_identity_service_pb.AppIdentityServiceError.NOT_A_VALID_APP:
InternalError,
app_identity_service_pb.AppIdentityServiceError.DEADLINE_EXCEEDED:
BackendDeadlineExceeded,
app_identity_service_pb.AppIdentityServiceError.BLOB_TOO_LARGE:
BlobSizeTooLarge,
app_identity_service_pb.AppIdentityServiceError.UNKNOWN_ERROR:
InternalError,
app_identity_service_pb.AppIdentityServiceError.UNKNOWN_SCOPE:
InvalidScope,
app_identity_service_pb.AppIdentityServiceError.NOT_ALLOWED:
NotAllowed,
app_identity_service_pb.AppIdentityServiceError.NOT_IMPLEMENTED:
OperationNotImplemented,
}
if error.application_error in error_map:
return error_map[error.application_error](error.error_detail)
else:
return InternalError('%s: %s' %
(error.application_error, error.error_detail))
class PublicCertificate(object):
"""Info about public certificate.
Attributes:
key_name: name of the certificate.
x509_certificate_pem: x509 cerficiates in pem format.
"""
def __init__(self, key_name, x509_certificate_pem):
self.key_name = key_name
self.x509_certificate_pem = x509_certificate_pem
def create_rpc(deadline=None, callback=None):
"""Creates an RPC object for use with the App identity API.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
callback: Optional callable to invoke on completion.
Returns:
An apiproxy_stub_map.UserRPC object specialized for this service.
"""
return apiproxy_stub_map.UserRPC(_APP_IDENTITY_SERVICE_NAME,
deadline, callback)
def make_sign_blob_call(rpc, bytes_to_sign):
"""Executes the RPC call to sign a blob.
Args:
rpc: a UserRPC instance.
bytes_to_sign: blob that needs to be signed.
Returns:
A tuple that contains the signing key name and the signature.
Raises:
TypeError: when bytes_to_sign is not a str.
"""
if not isinstance(bytes_to_sign, str):
raise TypeError('bytes_to_sign must be str: %s'
% bytes_to_sign)
request = app_identity_service_pb.SignForAppRequest()
request.set_bytes_to_sign(bytes_to_sign)
response = app_identity_service_pb.SignForAppResponse()
def signing_for_app_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A tuple that contains signing key name and signature.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _SIGN_FOR_APP_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
return (response.key_name(), response.signature_bytes())
rpc.make_call(_SIGN_FOR_APP_METHOD_NAME, request,
response, signing_for_app_result)
def make_get_public_certificates_call(rpc):
"""Executes the RPC call to get a list of public certificates.
Args:
rpc: a UserRPC instance.
Returns:
A list of PublicCertificate object.
"""
request = app_identity_service_pb.GetPublicCertificateForAppRequest()
response = app_identity_service_pb.GetPublicCertificateForAppResponse()
def get_certs_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A list of PublicCertificate object.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_CERTS_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
result = []
for cert in response.public_certificate_list_list():
result.append(PublicCertificate(
cert.key_name(), cert.x509_certificate_pem()))
return result
rpc.make_call(_GET_CERTS_METHOD_NAME, request, response, get_certs_result)
def make_get_service_account_name_call(rpc):
"""Get service account name of the app.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Service account name of the app.
"""
request = app_identity_service_pb.GetServiceAccountNameRequest()
response = app_identity_service_pb.GetServiceAccountNameResponse()
if rpc.deadline is not None:
request.set_deadline(rpc.deadline)
def get_service_account_name_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A string which is service account name of the app.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_SERVICE_ACCOUNT_NAME_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
return response.service_account_name()
rpc.make_call(_GET_SERVICE_ACCOUNT_NAME_METHOD_NAME, request,
response, get_service_account_name_result)
def make_get_default_gcs_bucket_name_call(rpc):
"""Get default google storage bucket name for the app.
Args:
rpc: A UserRPC object.
Returns:
Default Google Storage Bucket name of the app.
"""
request = app_identity_service_pb.GetDefaultGcsBucketNameRequest()
response = app_identity_service_pb.GetDefaultGcsBucketNameResponse()
if rpc.deadline is not None:
request.set_deadline(rpc.deadline)
def get_default_gcs_bucket_name_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
A string which is the name of the app's default google storage bucket.
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_DEFAULT_GCS_BUCKET_NAME_METHOD_NAME, (
repr(rpc.method))
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
if response.has_default_gcs_bucket_name():
return response.default_gcs_bucket_name()
else:
return None
rpc.make_call(_GET_DEFAULT_GCS_BUCKET_NAME_METHOD_NAME, request,
response, get_default_gcs_bucket_name_result)
def sign_blob(bytes_to_sign, deadline=None):
"""Signs a blob.
Args:
bytes_to_sign: blob that needs to be signed.
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Tuple, signing key name and signature.
"""
rpc = create_rpc(deadline)
make_sign_blob_call(rpc, bytes_to_sign)
rpc.wait()
return rpc.get_result()
def get_public_certificates(deadline=None):
"""Get public certificates.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
A list of PublicCertificate object.
"""
rpc = create_rpc(deadline)
make_get_public_certificates_call(rpc)
rpc.wait()
return rpc.get_result()
def get_service_account_name(deadline=None):
"""Get service account name of the app.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Service account name of the app.
"""
rpc = create_rpc(deadline)
make_get_service_account_name_call(rpc)
rpc.wait()
return rpc.get_result()
def get_default_gcs_bucket_name(deadline=None):
"""Gets the default gs bucket name for the app.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Default bucket name for the app.
"""
rpc = create_rpc(deadline)
make_get_default_gcs_bucket_name_call(rpc)
rpc.wait()
return rpc.get_result()
def _ParseFullAppId(app_id):
"""Parse a full app id into partition, domain name and display app_id.
Args:
app_id: The full partitioned app id.
Returns:
A tuple (partition, domain_name, display_app_id). The partition
and domain name may be empty.
"""
partition = ''
psep = app_id.find(_PARTITION_SEPARATOR)
if psep > 0:
partition = app_id[:psep]
app_id = app_id[psep+1:]
domain_name = ''
dsep = app_id.find(_DOMAIN_SEPARATOR)
if dsep > 0:
domain_name = app_id[:dsep]
app_id = app_id[dsep+1:]
return partition, domain_name, app_id
def get_application_id():
"""Get the application id of an app.
Returns:
The application id of the app.
"""
full_app_id = os.getenv('APPLICATION_ID')
_, domain_name, display_app_id = _ParseFullAppId(full_app_id)
if domain_name:
return '%s%s%s' % (domain_name, _DOMAIN_SEPARATOR, display_app_id)
return display_app_id
def get_default_version_hostname():
"""Get the standard hostname of the default version of the app.
For example if your application_id is my-app then the result might be
my-app.appspot.com.
Returns:
The standard hostname of the default version of the application.
"""
return os.getenv('DEFAULT_VERSION_HOSTNAME')
def make_get_access_token_call(rpc, scopes, service_account_id=None):
"""OAuth2 access token to act on behalf of the application (async, uncached).
Most developers should use get_access_token instead.
Args:
rpc: RPC object.
scopes: The requested API scope string, or a list of strings.
Raises:
InvalidScope: if the scopes are unspecified or invalid.
"""
request = app_identity_service_pb.GetAccessTokenRequest()
if not scopes:
raise InvalidScope('No scopes specified.')
if isinstance(scopes, basestring):
request.add_scope(scopes)
else:
for scope in scopes:
request.add_scope(scope)
if service_account_id:
if isinstance(service_account_id, (int, long)):
request.set_service_account_id(service_account_id)
elif isinstance(service_account_id, basestring):
request.set_service_account_name(service_account_id)
else:
raise TypeError()
response = app_identity_service_pb.GetAccessTokenResponse()
def get_access_token_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Returns:
Pair, Access token (string) and expiration time (seconds since the epoch).
"""
assert rpc.service == _APP_IDENTITY_SERVICE_NAME, repr(rpc.service)
assert rpc.method == _GET_ACCESS_TOKEN_METHOD_NAME, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _to_app_identity_error(err)
return response.access_token(), response.expiration_time()
rpc.make_call(_GET_ACCESS_TOKEN_METHOD_NAME, request,
response, get_access_token_result)
def get_access_token_uncached(scopes, deadline=None, service_account_id=None):
"""OAuth2 access token to act on behalf of the application (sync, uncached).
Most developers should use get_access_token instead.
Args:
scopes: The requested API scope string, or a list of strings.
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
Returns:
Pair, Access token (string) and expiration time (seconds since the epoch).
"""
rpc = create_rpc(deadline)
make_get_access_token_call(rpc, scopes, service_account_id=service_account_id)
rpc.wait()
return rpc.get_result()
def get_access_token(scopes, service_account_id=None):
"""OAuth2 access token to act on behalf of the application, cached.
Generates and caches an OAuth2 access token for the service account for the
appengine application.
Each application has an associated Google account. This function returns
OAuth2 access token corresponding to the running app. Access tokens are safe
to cache and reuse until their expiry time as returned. This method will
do that using both an in-process cache and memcache.
Args:
scopes: The requested API scope string, or a list of strings.
Returns:
Pair, Access token (string) and expiration time (seconds since the epoch).
"""
cache_key = _MEMCACHE_KEY_PREFIX + str(scopes)
if service_account_id:
cache_key += ',%s' % service_account_id
cached = _access_token_cache.get(cache_key)
if cached is not None:
access_token, expires_at = cached
safe_expiry = (expires_at - _TOKEN_EXPIRY_SAFETY_MARGIN -
_random_cache_expiry_delta)
if time.time() < safe_expiry:
return access_token, expires_at
memcache_value = memcache.get(cache_key, namespace=_MEMCACHE_NAMESPACE)
if memcache_value:
access_token, expires_at = memcache_value
else:
access_token, expires_at = get_access_token_uncached(
scopes, service_account_id=service_account_id)
memcache_expiry = expires_at - _TOKEN_EXPIRY_SAFETY_MARGIN
memcache_expiry -= _MAX_RANDOM_EXPIRY_DELTA
memcache_expiry -= 10
memcache.add(cache_key, (access_token, expires_at),
memcache_expiry,
namespace=_MEMCACHE_NAMESPACE)
if len(_access_token_cache) >= _MAX_TOKEN_CACHE_SIZE:
_access_token_cache.clear()
_access_token_cache[cache_key] = (access_token, expires_at)
return access_token, expires_at
| [
"[email protected]"
] | |
c38f76869d89fa31fa4b8ef71ec6a19879b98f2c | 41e02a6cb11149501c8cb05cf549e6497da146b3 | /social_networking_project/settings.py | 710df13989abd3782da33747c3675a1f9f71d4d4 | [] | no_license | muffajal53/holiday_list_by_IP_address_django | f47e8de99ed6455754240d8d8b6f683672e8730d | d05a39a1314c72acd71a7af22ee83ae58ea2d823 | refs/heads/master | 2023-04-26T22:22:20.825232 | 2021-05-31T19:44:28 | 2021-05-31T19:44:28 | 372,609,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,638 | py | import os
from pathlib import Path
from datetime import timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-g$7xtg$kb@x+1(1wi(zd%umf&^s62nkh@^hbcms4=r)3ycj#bk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# comment this for production
BASE_URL = "http://localhost:8000"
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'user',
'post',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'social_networking_project.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'social_networking_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
# "DEFAULT_PERMISSION_CLASSES": ["rest_framework.permissions.IsAuthenticated"],
# "DEFAULT_FILTER_BACKENDS": [
# "django_filters.rest_framework.DjangoFilterBackend",
# "rest_framework.filters.OrderingFilter",
# ]
}
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(days=1),
"REFRESH_TOKEN_LIFETIME": timedelta(days=365),
"ROTATE_REFRESH_TOKENS": True,
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"BLACKLIST_AFTER_ROTATION": True,
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
}
# CELERY STUFF
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Kolkata'
#Abstract API keys
IP_GEOLOCATION_API_KEY = 'c3059a655dec4f869fd642b17fd9bbd8'
HOLIDAY_API_KEY = '8f4767975c7c4e0180e61fe71b9c3a68' | [
"[email protected]"
] | |
4e0dfcb09266aa4877dbacacaf17ce807eaf09d9 | 289818a63e8213606fd437c0ab29f0dba6750357 | /search/urls.py | 147bf92651599f02ee1e91018879a0b443ddbdda | [] | no_license | dzynin/django-mtr | 642b9e4b7f568d89059cedcf3dd8a4061a94ba83 | 15c9ba13b2f91cacd76b9c04da1c6ee4cdfe9aeb | refs/heads/master | 2021-05-29T07:29:42.303984 | 2015-09-06T13:54:41 | 2015-09-06T13:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
#url(r'^$', include('haystack.urls')),
#url(r'^advanced/$', 'search.views.index'),
#url(r'^autocomplete/$', 'search.autocomplete.autocomplete'),
url(r'^$', 'search.views.results', name='search'),
)
| [
"[email protected]"
] | |
526eb57ef6986d95baa70b2a0a489faab9ae04ff | 3b0feb458896e588297db26aab635b45b3765cbc | /hbaseread2.py | 7106b530f5b27b128083e180b0e743803be1d8bb | [] | no_license | guoch/sparkproject | af384551be26261471cc8ad766010c3f8cda8252 | 2afd598ec7d6849268230f28e9ffc970a5a61d88 | refs/heads/master | 2021-01-10T11:03:26.060566 | 2015-12-26T13:16:07 | 2015-12-26T13:16:07 | 47,060,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,971 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import json
# from jieba import analyse
import jieba
from pyspark import SparkContext
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
"""
author:Kira(Chenghao Guo)
根据时间筛选top k
Create test data in HBase first:
hbase(main):016:0> create 'test', 'f1'
0 row(s) in 1.0430 seconds
hbase(main):017:0> put 'test', 'row1', 'f1:a', 'value1'
0 row(s) in 0.0130 seconds
hbase(main):018:0> put 'test', 'row1', 'f1:b', 'value2'
0 row(s) in 0.0030 seconds
hbase(main):019:0> put 'test', 'row2', 'f1', 'value3'
0 row(s) in 0.0050 seconds
hbase(main):020:0> put 'test', 'row3', 'f1', 'value4'
0 row(s) in 0.0110 seconds
hbase(main):021:0> scan 'test'
ROW COLUMN+CELL
row1 column=f1:a, timestamp=1401883411986, value=value1
row1 column=f1:b, timestamp=1401883415212, value=value2
row2 column=f1:, timestamp=1401883417858, value=value3
row3 column=f1:, timestamp=1401883420805, value=value4
4 row(s) in 0.0240 seconds
"""
def wordcut(v):
try:
x=eval("'%s'"%v['value'])
except Exception,ex:
x='invalid'
seglist=jieba.cut(x)
# seglist=analyse.extract_tags(x,10)
myvalue='|'.join(seglist)
return myvalue
# def content_analyse(v):
# try:
# x=eval("'%s'"%v['value'])
# except Exception,ex:
# x='invalid'
# # seglist=jieba.cut(x)
# seglist=analyse.extract_tags(x,10)
# myvalue='|'.join(seglist)
# return myvalue
def inverted(v):
url=v[0]
return ((word,url) for word in v[1].split('|'))
def ridoff(ids):
news_ids=list(set(ids))
# news_ids.sort(ids.index)
return news_ids
def hbaseput(sc,host,table,args): #单独插入性能比较差,并行插入
'''
./bin/spark-submit --driver-class-path /path/to/example/jar \
/path/to/examples/hbase_outputformat.py <args>
Assumes you have created <table> with column family <family> in HBase
running on <host> already
'''
conf = {"hbase.zookeeper.quorum": host,
"hbase.mapred.outputtable": table,
"mapreduce.outputformat.class": "org.apache.hadoop.hbase.mapreduce.TableOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Writable"}
keyConv = "org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter"
valueConv = "org.apache.spark.examples.pythonconverters.StringListToPutConverter"
sc.parallelize([args]).map(lambda x: (x[0], x)).saveAsNewAPIHadoopDataset(
conf=conf,
keyConverter=keyConv,
valueConverter=valueConv)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("""
Usage: hbase_inputformat <host> <table>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \
/path/to/examples/hbase_inputformat.py <host> <table> [<znode>]
Assumes you have some data in HBase already, running on <host>, in <table>
optionally, you can specify parent znode for your hbase cluster - <znode>
""", file=sys.stderr)
exit(-1)
host = sys.argv[1]
table = sys.argv[2]
# outputdir=sys.argv[3]
sc = SparkContext(appName="HBaseInputFormat")
# sc.addJar('/home/scidb/spark-1.5.2/lib/spark-examples-1.5.2-hadoop2.6.0.jar')
# Other options for configuring scan behavior are available. More information available at
# https://github.com/apache/hbase/blob/master/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
conf = {"hbase.zookeeper.quorum": host, "hbase.mapreduce.inputtable": table}
if len(sys.argv) > 3:
conf = {"hbase.zookeeper.quorum": host, "zookeeper.znode.parent": sys.argv[3],
"hbase.mapreduce.inputtable": table}
keyConv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
valueConv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
hbase_rdd = sc.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat","org.apache.hadoop.hbase.io.ImmutableBytesWritable","org.apache.hadoop.hbase.client.Result",keyConverter=keyConv,valueConverter=valueConv,conf=conf)
hbase_rdd = hbase_rdd.flatMapValues(lambda v: v.split("\n")).mapValues(json.loads)
hbase_rdd_title=hbase_rdd.filter(lambda keyValue: keyValue[1]['qualifier']=='title' and keyValue[1]['value']!=None)
hbase_rdd_title=hbase_rdd_title.mapValues(wordcut) #分析title中所有的关键词,title的权重更加重一些
# hbase_rdd_content=hbase_rdd.filter(lambda keyValue: keyValue[1]['qualifier']=='content' and keyValue[1]['value']!=None)
# hbase_rdd_content=hbase_rdd_content.mapValues(content_analyse) #按照tf-idf分析去除不相干的关键词以及得到top k的词
# tags=jieba.analyse.extract_tags(content,top_num)
'''
|著名|导演|郭宝昌|最新|执导|的|中国|首部|历史|谋略|情节剧|《|谋圣|鬼谷子|》|正在|浙江省|象山|影视城|热拍|。|郭宝昌|出|“|宅门|”|后|首次|玩|“|谋略|”|,|让|这部|剧|深受|观众|期待|,|他|表示|《|谋圣|鬼谷子|》|要|打|造成|中国|版|《|权力|的|游戏|》|。|
'''
# hbase_rdd_new=hbase_rdd_title.union(hbase_rdd_content)
hbase_rdd_new=hbase_rdd_title
#hbase_rdd_title=hbase_rdd_title.flatMap(inverted)
hbase_rdd_new=hbase_rdd_new.flatMap(inverted).groupByKey()
#list(set(myList)) 对list去重,一行里面包括多个url并rank
hbase_rdd_new=hbase_rdd_new.filter(lambda keyValue:len(keyValue[0])>4) #过滤太短的关键词
#rank策略 content基于tfidf后
# hbase_rdd_new=hbase_rdd_new.mapValues(lambda v: list(set(v))).mapValues(lambda v: "|".join(v))
hbase_rdd_new=hbase_rdd_new.mapValues(ridoff).mapValues(lambda v: "|".join(v))
# sc.union(rdd1, rdd2)
# output = hbase_rdd_new.collect()
# for (k, v) in output:
# for url in v:
# if len(k)>1:
# hbaseput(sc,'ubuntu1','test3',[k,'f','index',url])
# print(k+':'+",".join(v)) #记得删除重复的url
# if v['qualifier']=='content':
# print(eval("'%s'"%v['value']))
# wordRDD=tc.flatMap(lambda x:jieba.cut(x))
# wordFreRDD=wordRDD.map(lambda x:(x,1))
# counts=wordFreRDD.reduceByKey(add)
# tags=jieba.analyse.extract_tags(content,top_num)
#hbase_outputformat <host> test row1 f q1 value1
host='ubuntu1'
table='newsindex'
confout = {"hbase.zookeeper.quorum": host,
"hbase.mapred.outputtable": table,
"mapreduce.outputformat.class": "org.apache.hadoop.hbase.mapreduce.TableOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Writable"}
keyConvout = "org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter"
valueConvout = "org.apache.spark.examples.pythonconverters.StringListToPutConverter"
#rowid的设计是唯一的,但内容不唯一
hbase_rdd_new.map(lambda x: [x[0],'f','index',x[1]]).map(lambda x: (x[0], x)).saveAsNewAPIHadoopDataset(
conf=confout,
keyConverter=keyConvout,
valueConverter=valueConvout)
sc.stop()
#处理value的内容
'''
result = pairs.filter(lambda keyValue: len(keyValue[1]) < 20)
nums = sc.parallelize([1, 2, 3, 4])
squared = nums.map(lambda x: x * x).collect()
for num in squared:
print "%i " % (num)
pairs = lines.map(lambda x: (x.split(" ")[0], x))
'''
#print((k, v))
'''
(u'http://www.chinanews.com/yl/2015/12-13/7668707.shtml', {u'qualifier': u'title', u'timestamp': u'1449980290800',
u'value': u'\\xE9\\xA6\\x99\\xE6\\xB8\\xAF\\xE6\\xBC\\x94\\xE5\\x91\\x98\\xE6\\x9E\\x97\\xE5\\xAD\\x90\\xE8\\x81\\xAA\\xE5\\xBD\\x93\\xE7\\x88\\xB8\\xE7\\x88\\xB8 \\xE8\\xA2\\xAB\\xE5\\x84\\xBF\\xE5\\xAD\\x90\\xE8\\x84\\x9A\\xE8\\xB8\\xA2\\xE6\\x84\\x9F\\xE5\\x8A\\xA8\\xE5\\x88\\xB0\\xE5\\x93\\xAD(\\xE5\\x9B\\xBE)', u'columnFamily': u'f', u'type': u'Put', u'row': u'http://www.chinanews.com/yl/2015/12-13/7668707.shtml'})
u'row1', {u'qualifier': u'a', u'timestamp': u'1450598363113', u'value': u'value1', u'columnFamily': u'f1', u'type': u'Put', u'row': u'row1'}
(u'row1', {u'qualifier': u'b', u'timestamp': u'1450598369239', u'value': u'value2', u'columnFamily': u'f1', u'type': u'Put', u'row': u'row1'})
(u'row2', {u'qualifier': u'', u'timestamp': u'1450598376945', u'value': u'value3', u'columnFamily': u'f1', u'type': u'Put', u'row': u'row2'})
(u'row3', {u'qualifier': u'', u'timestamp': u'1450598382736', u'value': u'value4', u'columnFamily': u'f1', u'type': u'Put', u'row': u'row3'})
''' | [
"[email protected]"
] | |
60e7629da9f99587fad3ae8ca2de4fb0451aa8b9 | a2c66f592770b04d27319b470ff12ec0342bf326 | /Reinforcement-Learning/K_armed_bandit.py | 7322f6d932093342c34af29782e7a55f9222e7d3 | [] | no_license | Zhangyxyyx/Machine-Learning | 5f4ffcd4604dcf72105512f37606f866c1b0fd00 | 434b71d884905e88c4535249109cca757924b6a7 | refs/heads/master | 2022-02-23T13:40:26.890800 | 2019-10-16T08:22:14 | 2019-10-16T08:22:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | """
a implementation of K armed bandit, the algorithm is in page 375 of book <Machine Learning>
"""
import random
R = [1, 2, 3, 4, 5]
e = 0.5
r = 0
arms_num = 5
Q = [0 for i in range(arms_num)]
count = [0 for i in range(arms_num)]
T = 100000
for i in range(T):
p = random.random()
if p <0.001:
k = random.randint(0, 4)
else:
temp = max(Q)
max_index = []
for j in range(len(Q)):
if Q[j] == temp:
max_index.append(j)
index = random.randint(0, len(max_index) - 1)
k = max_index[index]
v = R[k]
r += v
Q[k] = (Q[k] * count[k] + v) / (count[k] + 1)
count[k] += 1
print(r)
print(count)
| [
"[email protected]"
] | |
72d9567ddd4b1974eda07d6f6f72ea3423236bd1 | f16eaef4075a55f46d53729ab0612b5111b176d4 | /00_data_structure/binary_tree.py | c425f9de960ade636c14ee2118274fe230d46f46 | [] | no_license | K-AlfredIwasaki/coding_interview | cfc590feb2414e0a7403a2f5a3d2497ea78a4f02 | 470ccbc8ee18f3d23bfa5bbdc93b72867d0b2647 | refs/heads/master | 2021-08-23T23:06:48.112588 | 2017-12-07T01:03:01 | 2017-12-07T01:03:01 | 103,080,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | class BinaryTree:
def __init__(self, content, left=None, right=None):
self.content = content
self.left = left
self.right = right
self.depth = -1
def __str__(self):
return "(" + str(self.content) + " ( " + str(self.left) + " | " \
+ str(self.right) + "))" | [
"[email protected]"
] | |
37d58cea0c7347774b5080d6bb595b4f918caefd | 5edded0c82e56dd57b120fec7ff0f3ba9ed18961 | /shape_calculator.py | d1308e70467b64062286ab87dc9f6a1aa8769684 | [] | no_license | jorgechavarriaga/PolygonAreaCalculator | 933d87798c6c5f5ae8c4fb6de3fd4d0de978b492 | 32c7604e938d68569075afd0bcc91b344b930d44 | refs/heads/main | 2023-03-28T19:43:53.056877 | 2021-03-29T13:00:28 | 2021-03-29T13:00:28 | 351,799,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | import math
# Rectangle class
class Rectangle:
# When a Rectangle object is created, it should be initialized with `width` and `height` attributes.
def __init__(self, width, height):
self.width = width
self.height = height
# If an instance of a Rectangle is represented as a string, it should look like: `Rectangle(width=5, height=10)`
def __str__(self):
rectangle_string = type(self).__name__ + "(width=" + str(self.width) + ", height=" + str(self.height) +")"
return rectangle_string
# method set_width
def set_width(self, width):
self.width = width
# method set_height
def set_height(self, height):
self.height = height
# method get_area
def get_area(self):
area = self.width * self.height
return area
# method get_perimeter
def get_perimeter(self):
perimeter = (2* (self.width + self.height))
return perimeter
# method get_diagonal
def get_diagonal(self):
diagonal = math.sqrt(pow(self.width, 2) + pow(self.height, 2))
return diagonal
# method get_picture
def get_picture(self):
if self.width > 50 or self.height > 50:
return 'Too big for picture.'
pattern = ''
for index in range(self.height):
for index in range(self.width):
pattern += '*'
pattern += '\n'
return pattern
# method get_amount_inside
def get_amount_inside(self, shape):
area1 = shape.get_area()
count = 0
area_home = self.get_area()
while area_home >= area1:
area_home = area_home - area1
count += 1
return count
# Square class: The Square class should be a subclass of Rectangle. When a Square object is
# created, a single side length is passed in. The `__init__` method should store the side
# length in both the `width` and `height` attributes from the Rectangle class.
class Square(Rectangle):
def __init__(self, side):
super().__init__(side,side)
# An instance of a Square is represented as a string
def __str__(self):
square_string = type(self).__name__ + "(side="+ str(self.width) + ")"
return square_string
# method set_side
def set_side(self, side):
self.set_width(side)
self.set_height(side) | [
"[email protected]"
] | |
d2a77564db9f4569384851f86e6be093484a266c | 5b56c0695d0582186f8aa902e51551c2c297c292 | /python/generateNormalizedData.py | 5e5dc13c2095ee25e5cb3dc4fb056ce8a22717d5 | [] | no_license | IamcalledPhil/bechdel-test-data-visualisation | caab8d3ea8ee04c3f5028ca5e99f2d6c4ffcf62c | ff26f695e9c1af7eacad83c67e8b570aa6610b76 | refs/heads/master | 2022-09-19T00:41:56.358308 | 2020-01-11T23:12:45 | 2020-01-11T23:12:45 | 267,276,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | import json
import io
with open('moviesBechdel.json') as f:
moviesBechdel = json.load(f)
with open('moviesBudget.json') as f:
moviesBudget = json.load(f)
normalizedMovies = []
for movie in moviesBudget:
for bechMovie in moviesBechdel:
if movie["imdb_id"] == 'tt' + bechMovie["imdbid"]:
normalizedMovie = bechMovie
normalizedMovie["budget"] = movie["budget"]
normalizedMovies.append(normalizedMovie)
with io.open('normalizedMovies.json', 'w', encoding='utf8') as outfile:
data = json.dumps(normalizedMovies, outfile, ensure_ascii=False, indent=4)
outfile.write(unicode(data)) | [
"[email protected]"
] | |
a4208a1024afaa7f06714847c3404d14fb8e0bcc | d9a1d531af2a553c214f29bdff0f8f69352c862a | /Exp 6/script.py | 1bf87580f4864cecb6466c78a1c75b5ce89ff926 | [] | no_license | kante95/FP2 | 7831685878c916960262d13a444097077c34cd67 | 7f58ed3be0b74ce526ddceaccd38e624cfbee305 | refs/heads/master | 2021-05-15T15:12:17.925441 | 2018-01-25T10:58:34 | 2018-01-25T10:58:34 | 107,283,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,425 | py | import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 16
directory = "data_rubidium/"
def multi_lorentz_peak(w, Voff, Vlorentz, width, wresonance,
Vlorentz2, width2, wresonance2,
Vlorentz3, width3, wresonance3,
Vlorentz4, width4, wresonance4,
Vlorentz5, width5, wresonance5,
Vlorentz6, width6, wresonance6):
return (Voff+Vlorentz*width/((w-wresonance)**2+(width)**2)+
Vlorentz2*width2/((w-wresonance2)**2+(width2)**2)+
Vlorentz3*width3/((w-wresonance3)**2+(width3)**2)+
Vlorentz4*width4/((w-wresonance4)**2+(width4)**2)+
Vlorentz5*width5/((w-wresonance5)**2+(width5)**2)+
Vlorentz6*width6/((w-wresonance6)**2+(width6)**2))
def multi_lorentz_peak2(w, Voff, Vlorentz, width, wresonance,
Vlorentz2, width2, wresonance2,
Vlorentz3, width3, wresonance3,
Vlorentz4, width4, wresonance4,
Vlorentz5, width5, wresonance5,
Vlorentz6, width6, wresonance6,
Vlorentz7, width7, wresonance7):
return (Voff+Vlorentz*width/((w-wresonance)**2+(width)**2)+
Vlorentz2*width2/((w-wresonance2)**2+(width2)**2)+
Vlorentz3*width3/((w-wresonance3)**2+(width3)**2)+
Vlorentz4*width4/((w-wresonance4)**2+(width4)**2)+
Vlorentz5*width5/((w-wresonance5)**2+(width5)**2)+
Vlorentz6*width6/((w-wresonance6)**2+(width6)**2)+
Vlorentz7*width7/((w-wresonance7)**2+(width7)**2))
def lorentz(x,A,m,s):
return A*(s/((x-m)**2+s**2))
def lorentz_with_offset(x,A,m,s,offset):
return A*(s/((x-m)**2+s**2)) + offset
def gaussian(x, height, center, width):
return height*np.exp(-(x - center)**2/(2*width**2))
def read_oscilloscope_data(file):
data = np.genfromtxt( file, delimiter=",",
usecols=range(3,5), skip_header=0)
return data[:,0],data[:,1]
#fabryperot
t,v = read_oscilloscope_data(directory+"ALL0106/F0106CH1.CSV")
plt.errorbar(t,v,yerr = (1.6/256)*np.ones(len(t)))
plt.xlabel("Time [s]")
plt.ylabel("Voltage [V]")
def center_of_peak(t,v,low,high):
#first peak
v = v[(t> low) & (t< high)]
t = t[(t> low) & (t< high)]
plt.figure()
plt.errorbar(t,v,yerr = (1.6/256)*np.ones(len(t)),fmt='.',markersize=5,label="Experimental data")
#popt, pcov = curve_fit(gaussian, t, v,bounds = ([-np.inf,-0.0010,0],[np.inf,-0.0004,0.00025]))
#perr = np.sqrt(np.diag(pcov))
popt, pcov = curve_fit(lorentz, t, v,sigma = (1.6/256)*np.ones(len(t)),bounds = ([0,low,0],[np.inf,high,0.0004]))
perr = np.sqrt(np.diag(pcov))
print("Lorentzian peak fit: A,t0,gamma")
print(popt,perr)
print("Reduced chi2")
chi_squared = np.sum( ((lorentz(t, *popt)-v)/(1.6/256))**2 )
reduced_chi_squared = chi_squared / (len(t) - len(popt))
print(reduced_chi_squared)
t1 = np.arange(low,high,0.0000001)
plt.plot(t1, lorentz(t1, *popt), 'r-', label='Lorentzian fit')
popt, pcov = curve_fit(gaussian, t, v,sigma = (1.6/256)*np.ones(len(t)),bounds = ([0,low,0],[np.inf,high,0.0004]))
perr = np.sqrt(np.diag(pcov))
print("Gaussia peak fit: B,t0,sigma")
print(popt,perr)
print("Reduced chi2")
chi_squared = np.sum( ((gaussian(t, *popt)-v)/(1.6/256))**2 )
reduced_chi_squared = chi_squared / (len(t) - len(popt))
print(reduced_chi_squared)
t = np.arange(low,high,0.000001)
plt.plot(t, gaussian(t, *popt), 'g-', label='Gaussian fit')
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Voltage [V]")
return popt[1],perr[1]
center1,dcenter1 = center_of_peak(t,v,-0.000750,-0.000640)
center2,dcenter2 = center_of_peak(t,v,0.00294,0.00304)
c = 299792458
L = 20e-2
real_fsr = c/L
time_fsr = -center1+center2
dtime_fsr = np.sqrt(dcenter1**2 + dcenter2**2)
print("Free spectral range: %f +/- %f",time_fsr,dtime_fsr)
def t2freq(t):
return (real_fsr/time_fsr)*t*1e-6 #in MHz
#hyperfine structure
t,v = read_oscilloscope_data(directory+"ALL0102/F0102CH3.CSV")
t = t2freq(t)
tb,vb = read_oscilloscope_data(directory+"ALL0103/F0103CH3.CSV")
tb= t2freq(tb)
plt.figure()
plt.errorbar(t,v,yerr = (0.08/(256*np.sqrt(12)))*np.ones(len(t)), label="Hyperfine spectrum",fmt='.',markersize=5)
plt.errorbar(tb,vb,yerr = (0.08/(256*np.sqrt(12)))*np.ones(len(tb)),label="Background",fmt='.',markersize=5)
plt.xlabel("Detuning [MHz]")
plt.ylabel("Voltage [V]")
plt.legend()
diff = v-vb
# high = 450
# low = -150
# diff = diff[(t> low) & (t< high)]
# t = t[(t> low) & (t< high)]
plt.figure()
#plt.errorbar(t,diff,yerr = (0.08/256)*np.ones(len(t)),fmt="." )
plt.errorbar(t,diff,yerr = (2*0.08/(256*np.sqrt(12)))*np.ones(len(tb)),fmt='.',label = "Experimental data",markersize=5)
p= [-5.10681072e-03 ,3.76653515e-02, 1.58333476e+01, -7.77073287e+01,1.53816471e-01 , 1.45447758e+01, 6.85368665e+00 , 1.06667251e-01, 1.31304916e+01 , 9.13358709e+1 ,7.13693568e-1, 1.69622715e1, 2.37608123e+02 ,3.57212145e-1 , 1.22766961e1 , 1.52825804e+02,1.76598802e-3 , 1.62151338e1,378,1,1,1]
# def lorentz_peaks(t,v,low,high):
# #first peak
# v = v[(t> low) & (t< high)]
# t = t[(t> low) & (t< high)]
# popt, pcov = curve_fit(lorentz_with_offset, t, v,sigma = (0.04/256)*np.ones(len(t)),bounds = ([0,low,10,0.005],[np.inf,high,50,np.inf]))
# perr = np.sqrt(np.diag(pcov))
# print(popt,perr)
# t1 = np.arange(low,high,1)
# plt.plot(t1, lorentz_with_offset(t1, *popt), 'r-', label='fit: A=%f, xc=%f, s=%f offset=%f' % tuple(popt))
# plt.legend()
# return popt[1]
# lorentz_peaks(t,v,190,270)
popt, pcov = curve_fit(multi_lorentz_peak2, t, diff,sigma = (0.08/256)*np.ones(len(t)),p0=p,method="lm")
perr = np.sqrt(np.diag(pcov))
print("Multi lorentzian fit")
print(popt)
print(perr)
#popt= [-5.07632489e-03, -1.80209252e-02 , -9.04256941e+00 ,-7.82219782e+01,1.794366e-01, 1.63950133e+01 , 7.25825710e+00 , 1.43503961e-01,1.59195514e+01 , 9.13954496e+01 , 7.98698005e-01 , 1.84465199e+01,2.37651304e+02 , 4.03636692e-01 , 1.33479294e+01, 1.52940906e+02 ,1.09375260e-02 ,7 , 378]
plt.plot(t, multi_lorentz_peak2(t, *popt), 'r-',label="Multi lorentzian fit", zorder=20)
plt.xlim([-150,450])
plt.xlabel("Detuning [MHz]")
plt.ylabel("Voltage [V]")
plt.grid(True)
plt.legend()
peak1 = popt[3]
peak2 = popt[9]
peak3 = popt[18]
dpeak1 = perr[3]
dpeak2 = perr[9]
dpeak3 = perr[18]
print("Picco F=1: "+str(popt[3])+ "+/-" + str(dpeak1))
print("Picco F=2: "+str(popt[9])+ "+/-" + str(dpeak2))
print("Picco F=3: "+str(popt[18])+ "+/-" + str(dpeak3))
print("Altri picchi:" +str(popt[6])+" "+str(popt[12])+" "+str(popt[15])+" "+str(popt[21]))
peak12 = peak2-peak1
peak13 = peak3-peak1
peak23 = peak3-peak2
dpeak12 = np.sqrt(dpeak1**2 + dpeak2**2)
dpeak13 = np.sqrt(dpeak3**2 + dpeak1**2)
dpeak23 = np.sqrt(dpeak2**2 + dpeak3**2)
print("Distanza picco F 1->2:" +str(peak12) + "+/-" + str(dpeak12))
print("Distanza picco F 1->3:" +str(peak13) + "+/-" + str(dpeak13))
print("Distanza picco F 2->3:" +str(peak23) + "+/-" + str(dpeak23))
A = (peak23+peak12)/5
B = (2*peak23-3*peak12)/5
dA = (1/5)*np.sqrt(dpeak23**2 + dpeak12**2)
dB = (1/5)*np.sqrt((2*dpeak23)**2 + (3*dpeak12)**2)
print("Magnetic dipole constant: h"+str(A)+ "+/-" + str(dA))
print("Electric quadrupole constant: h"+str(B)+ "+/-" + str(dB))
plt.show()
| [
"[email protected]"
] | |
0dd949bf525d884fb0afce61ba4dc913e5040fd5 | c9b1460e5620bb37aa3b087a25395aa722438bf2 | /decorators.py | f8985725ea12d357b3f708bde59dfa6412b1f58d | [] | no_license | 11Nithya/Python_Training_task | cdddc9c046aa7e37a2bf838beaea42633a8e27d9 | 1262caaec0848925a793da50b3fbc32901a6f729 | refs/heads/main | 2023-05-09T11:12:27.247617 | 2021-05-18T07:08:39 | 2021-05-18T07:08:39 | 346,230,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | #Explain what are all the use case of Decorators, How control flows in decorator invoke, Code Examples of use cases
Def: A decorator is function which add extra functionality to the existing code without changing the existing code
Eg: Time decorator
def _time(func):
def wrapper(*args ,**kwargs):
start=time.time()
result=func(*args,**kwargs)
end= time.time()
return result
return wrapper
@_time
def add(a,b):
print(a+b)
output console
add(1,2)
3
explanation: when the function is called the execution starts from the the outer function(i.e _time function) and
then it enters the inner function when the inner function(i.e wrapper function) is called it executes the existing function that is add function
it note downs the start time ie at what time the function start executing and end time i.e at what time the function ends the
inner function(wrapper function )will be having the address of add function and when it is called it displays the output and
outer function(_time function) will be having the reference of inner function. The wrapper function contains arguments(* args and **kwargs )which means
we can give variable number of position arguments and variable number of keyword arugements
| [
"[email protected]"
] | |
2358df38a615ac477179f9b02b827d6a91d6cba4 | 45b2504be9214e1b4e25aa41b9043ffa808215fa | /blog/blog/urls.py | 4325a6851fc976aebac6b58c669670f3676f81fe | [] | no_license | jai-singhal/pyclub | b53a9d2ac1f5f2e8332d242c681700e67af20afc | a7d1eca85e225d9d852c126bfe58da233ba1e363 | refs/heads/master | 2021-07-09T09:18:49.653342 | 2017-10-06T17:27:42 | 2017-10-06T17:27:42 | 106,029,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from posts.views import my_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^home/', my_view, name = "my_view")
]
| [
"="
] | = |
321e86f6cf9ddfe5c8462d2bcd3d5a5937364cdf | f086cbd31a55226ba4d04545abeca0953fffa6af | /build.py | 02942ef81a552c4a1795df0a2c55fc13e79404db | [] | no_license | kyungjejo/exprgram__old | 603f8c245a1364a6c29fff8c48e78e15862c1b5b | 8f29527c0a319f0e427d48077c50334cf7524aa6 | refs/heads/master | 2022-12-14T09:59:24.137871 | 2018-08-03T10:56:41 | 2018-08-03T10:56:41 | 139,372,160 | 0 | 0 | null | 2022-12-07T23:51:49 | 2018-07-02T00:18:28 | JavaScript | UTF-8 | Python | false | false | 1,371 | py | import subprocess, re, os
# move build folder from exprgram-front to backend
# subprocess.call("rm ./backend/build")
PATH = os.getcwd()
subprocess.call("cp -rf %s/exprgram-front/build %s/backend/" %(PATH,PATH), shell=True)
#
# subprocess.call("python ./backend/manage.py collectstatic", shell=True)
subprocess.call("mv ./backend/build/manifest.json ./backend/static", shell=True)
with open("./backend/build/index.html") as text:
html = "{% load static %}"+text.read()
html = re.sub('/manifest.json', "{% static 'manifest.json' %}", html)
css = re.compile("/static/css/main.*.css").search(html).group().replace("/static/",'')
new_css = "{% static '"+css+"' %}"
js = re.compile("/static/js/main.*.js").search(html).group().replace("/static/",'')
new_js = "{% static '"+js+"' %}"
html = re.sub("/static/css/main.*.css",new_css,html)
html = re.sub("/static/js/main.*.js",new_js,html)
with open("./backend/build/index.html",'w') as text:
text.write(html)
with open('./backend/build/static/'+css) as text:
new_css = text.read()
url = re.findall("url\((/static/media.*?)\)", new_css)
for idx,u in enumerate(url):
_u = u.strip("/static/")
_u = "{% static "+_u+" %}"
new_css = re.sub(u, _u, new_css)
with open('./backend/build/static/'+css, 'w') as text:
text.write("{% load static %}"+new_css)
| [
"[email protected]"
] | |
09fe65322acf3ae806998d7a71fd25ea26018983 | b639a3fbda373b0efb5e6eac6ca682f0be789cb0 | /modules/worker.py | 0ecc62afc41e4590ca2cb0771dffab0e6248b2c9 | [
"MIT"
] | permissive | Dinxor/tstore | 98b497c873124c492a9b35ff1f87df218e4d2bfd | ff2bb229ad2169926046076022b5a37025e98877 | refs/heads/main | 2023-08-07T03:38:40.645754 | 2021-10-02T05:56:44 | 2021-10-02T05:56:44 | 412,704,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from time import sleep
def worker(tt):
name = 'worker'
sleep(1)
cnt = 0
source = tt[name].get('queue')
while 1:
if not tt[name].get('is_working', False) and tt[name].get('is_enable', False):
tt[name].update({'is_working':True})
elif tt[name].get('is_working', False) and not tt[name].get('is_enable', False):
tt[name].update({'is_working':False})
while not source.empty():
new_data = source.get(block=True)
if tt[name].get('is_enable', False):
print(new_data)
cnt +=1
else:
pass
tt[name].update({'cnt':cnt})
sleep(1)
| [
"[email protected]"
] | |
3dd993f6f03823131322acc17b971c0a91733cae | 26938c6646e7a645037c037a3361633f568cdd6d | /src/DSSCDB/cron_wsgi.py | aab7ab613d311980cb7f9ff05d13041531d122ed | [
"AFL-3.0"
] | permissive | Elsenety/DSSCDB | 4f2fc9533d82de2e1d65c155e086ad2d7e4791a0 | 6631110c1bb477d45eab9c15324826958cd61ed6 | refs/heads/master | 2023-04-18T08:19:46.391491 | 2018-06-11T21:47:23 | 2018-06-11T21:47:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | import os
from django.core.management import call_command
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DSSCDB.settings")
application = get_wsgi_application()
from django.conf import settings
try:
import uwsgidecorators
@uwsgidecorators.timer(60)
def send_queued_mail(num):
"""Send queued mail every 60 seconds"""
call_command('send_queued_mail', processes=1)
@uwsgidecorators.cron(4, 4, -1, -1, 1)
def send_database_backup_email(num):
call_command('send_database_backup', **{'--backup_password': settings.DATABASE_EMAIL_PASSWORD})
except ImportError:
print("uwsgidecorators not found. Cron and timers are disabled")
| [
"[email protected]"
] | |
e48aaced021300f583f462fe3ef463ac9d0e8899 | 039b316064097dddce158adfd66684eb8c573760 | /NumberClassificationPerceptron/constantsNegative.py | 47b5fccf30e14394e384717a30b3d1cdd0aeeb72 | [] | no_license | Akshat-Raj-Vansh/neural-networks-classification | df7c1fe7a7a09f923a5824f29d1d7e8a0f56e4d2 | 0e2fb6d86ff738d66015de86a1617153945cc91d | refs/heads/main | 2023-07-26T11:20:14.625710 | 2021-09-06T10:35:52 | 2021-09-06T10:35:52 | 403,593,374 | 0 | 0 | null | 2021-09-06T11:16:36 | 2021-09-06T11:16:35 | null | UTF-8 | Python | false | false | 1,453 | py | #-1 : 111 1-11 1-11 1-11 111
#1 : -11-1 -11-1 -11-1 -11-1 -11-1
#2 : 111 -1-11 111 1-1-1 111
#3 : 111 -1-11 111 -1-11 111
#4 : 1-11 1-11 111 -1-11 -1-11
#5 : 111 1-1-1 111 -1-11 111
#6 : 111 1-1-1 111 1-11 111
#7 : 111 -1-11 -1-11 -1-11 -1-11
#8 : 111 1-11 111 1-11 111
#9 : 111 1-11 111 -1-11 111
import numpy as np
input_vector = np.array([[1,1,1,1,-1,1,1,-1,1,1,-1,1,1,1,1],
[-1,1,-1,-1,1,-1,-1,1,-1,-1,1,-1,-1,1,-1],
[1,1,1,-1,-1,1,1,1,1,1,-1,-1,1,1,1],
[1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,1,1],
[1,-1,1,1,-1,1,1,1,1,-1,-1,1,-1,-1,1],
[1,1,1,1,-1,-1,1,1,1,-1,-1,1,1,1,1],
[1,1,1,1,-1,-1,1,1,1,1,-1,1,1,1,1],
[1,1,1,-1,-1,1,-1,-1,1,-1,-1,1,-1,-1,1],
[1,1,1,1,-1,1,1,1,1,1,-1,1,1,1,1],
[1,1,1,1,-1,1,1,1,1,-1,-1,1,1,1,1]])
# 1-1 X 15
m,x = input_vector.shape
target_vector = np.array([[-1,-1,-1,-1],
[-1,-1,-1,1],
[-1,-1,1,-1],
[-1,-1,1,1],
[-1,1,-1,-1],
[-1,1,-1,1],
[-1,1,1,-1],
[-1,1,1,1],
[1,-1,-1,-1],
[1,-1,-1,1]])
# 1-1 X 4
n,t = target_vector.shape
input_nodes = 15
output_nodes = 4 | [
"[email protected]"
] | |
fae9fc8b3c6d53c95d9785dffe79b4025eb110d9 | cab84979234c907896275290fdfcbe6d6db7857d | /src/api_time_set/migrations/0001_initial.py | 14b7184903365c8e16af300b3bdd0e6fb2ff99be | [] | no_license | ntdo2506/Football_pitching | 80eea36fa48ee060b795146fbea64f81e6875f15 | b344d3fd38bf1777381db9973a40550fda3adf94 | refs/heads/master | 2023-04-07T14:04:41.558422 | 2021-03-01T12:46:32 | 2021-03-01T12:46:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Generated by Django 3.1.2 on 2020-10-05 15:48
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TimeSetModels',
fields=[
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
('duration', models.IntegerField()),
],
options={
'db_table': 'fb_time_set',
},
),
]
| [
"[email protected]"
] | |
4dafe2fa7f0fdf91b426d21f9eba0438c236c084 | 966108cddefb96ea53b579483afe54f60dfd174d | /args.py | fada44f45b6497efcd9785b63c1db7251a2be7d3 | [] | no_license | adelaidehsu/Personalized-Dialogue-Response-Generation-from-Monologues | 9f9aaa32f2e5a476be855752c474e91690d71166 | 85f1b9596cfb6abfe5a0ab803ff860203579de15 | refs/heads/master | 2021-10-25T19:33:52.437441 | 2019-10-21T07:45:01 | 2019-10-21T07:45:01 | 188,363,218 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,576 | py | import argparse
def parse():
parser = argparse.ArgumentParser(
description='You have to set the parameters for seq2seq, \
including both maximum likelihood estimation and \
generative adversarial learning.')
parser.add_argument("--file-head", type=str, default='None')
parser.add_argument("--pre-model-dir", type=str, default='None')
parser.add_argument("--pre-D-model-dir", type=str, default='None')
parser.add_argument("--model-dir", type=str, default='results/gan')
parser.add_argument("--pretrain-dir", type=str, default='results/pretrain')
parser.add_argument("--gan-dir", type=str, default='results/gan')
parser.add_argument("--glove-model", type=str, default='glove_model/corpus_op+fri')
parser.add_argument("--data-dir", type=str, default='data/')
parser.add_argument("--data-path", type=str, default='data/opensubtitles.txt')
parser.add_argument("--feature-path", type=str, default='data/feature.txt')
parser.add_argument("--feature-size", type=int, default=6)
parser.add_argument("--train-path", type=str, default='data/friends.txt')
parser.add_argument("--test-path", type=str, default='data/friends.txt')
parser.add_argument("--steps-per-checkpoint", type=int, default=200)
parser.add_argument("--lambda-one", type=float, default=0.5)
parser.add_argument("--lambda-two", type=float, default=0.5)
parser.add_argument("--lambda-dis", type=float, default=0.5)
parser.add_argument("--baseline", type=float, default=1.5)
parser.add_argument("--iteration", type=int, default=5000)
parser.add_argument("--Dstep", type=int, default=5)
parser.add_argument("--Gstep", type=int, default=1)
# s2s: for encoder and decoder
parser.add_argument("--size", type=int, default=256)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--lr", type=float, default=0.5)
parser.add_argument("--lr-decay", type=float, default=0.99)
parser.add_argument("--grad-norm", type=float, default=5.0)
parser.add_argument("--use-attn", type=bool, default=False)
parser.add_argument("--vocab-size", type=int, default=20000)
parser.add_argument("--output-sample", type=bool, default=False)
parser.add_argument("--input_embed", type=bool, default=True)
# s2s: training setting
parser.add_argument("--buckets", type=str, default='[(10, 5)]')
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--max-seq-len", type=int, default=30)
parser.add_argument("--max-train-data-size", type=int, default=0) # 0: no limit
# for value function
parser.add_argument("--v-lr", type=float, default=1e-4)
parser.add_argument("--v-lr-decay-factor", type=float, default=0.99)
# gan
parser.add_argument("--D-lr", type=float, default=1e-4)
parser.add_argument("--D-lr-decay-factor", type=float, default=0.99)
parser.add_argument("--gan-type", type=str, default='None')
parser.add_argument("--gan-size", type=int)
parser.add_argument("--gan-num-layers", type=int)
parser.add_argument("--G-step", type=int)
parser.add_argument("--D-step", type=int)
parser.add_argument("--option", type=str, default='None')
# test
parser.add_argument("--test-type", type=str, default='accuracy')
parser.add_argument("--test-critic", type=str, default='None')
parser.add_argument("--test-data", type=str, default='None')
parser.add_argument("--test-fout", type=str, default='None')
return parser.parse_args()
| [
"[email protected]"
] | |
99ecf43ecf798e941c912684222865758bf4ac30 | 37279a6b70fd432d96087f2154ded529ffbc0c9e | /dynamic_programming/1143-Longest-Common-Subsequence.py | 19ae64fa6848196a1a789fa9fc58963e6ac563b1 | [] | no_license | abhishek-jana/Leetcode-Solutions | cfe1bad64fda2421ba85f23121ca50ffc59357da | 9cd5d12b7438c646226a5e174571e3dbf339a179 | refs/heads/master | 2020-12-02T02:04:41.784082 | 2020-01-22T13:07:13 | 2020-01-22T13:07:13 | 230,852,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | '''
Given two strings text1 and text2, return the length of their longest common subsequence.
A subsequence of a string is a new string generated from the original string with some characters(can be none) deleted without changing the relative order of the remaining characters. (eg, "ace" is a subsequence of "abcde" while "aec" is not). A common subsequence of two strings is a subsequence that is common to both strings.
If there is no common subsequence, return 0.
Example 1:
Input: text1 = "abcde", text2 = "ace"
Output: 3
Explanation: The longest common subsequence is "ace" and its length is 3.
Example 2:
Input: text1 = "abc", text2 = "abc"
Output: 3
Explanation: The longest common subsequence is "abc" and its length is 3.
Example 3:
Input: text1 = "abc", text2 = "def"
Output: 0
Explanation: There is no such common subsequence, so the result is 0.
'''
#https://www.geeksforgeeks.org/longest-common-subsequence-dp-4/
# A Naive recursive Python implementation of LCS problem
def lcs(X, Y, m, n):
if m == 0 or n == 0:
return 0;
elif X[m-1] == Y[n-1]:
return 1 + lcs(X, Y, m-1, n-1);
else:
return max(lcs(X, Y, m, n-1), lcs(X, Y, m-1, n));
# Driver program to test the above function
X = "AGGTAB"
Y = "GXTXAYB"
print "Length of LCS is ", lcs(X, Y, len(X), len(Y))
# Dynamic Programming implementation of LCS problem
def lcs(X, Y):
# find the length of the strings
m = len(X)
n = len(Y)
# declaring the array for storing the dp values
L = [[None]*(n + 1) for i in range(m + 1)]
"""Following steps build L[m + 1][n + 1] in bottom up fashion
Note: L[i][j] contains length of LCS of X[0..i-1]
and Y[0..j-1]"""
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j], L[i][j-1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n]
# end of function lcs
# Driver program to test the above function
X = "AGGTAB"
Y = "GXTXAYB"
print "Length of LCS is ", lcs(X, Y)
| [
"[email protected]"
] | |
9693eb29e019a30e3f90532099f41b13e8b5c7c6 | 5d4bf79b0eabb6cd3a659f14d1d8471956a32a85 | /pyBrisbane.py | 20b8aa2d7f3d8ede3f12a6e17c17304f7df6222a | [] | no_license | crystalmanner/scrapy_source | 8c08d1b4aafc16df58d647ad308506e252a6c0d5 | 5cbdca543bc05a40942c801334f45e4b27555725 | refs/heads/master | 2023-02-21T10:33:00.857451 | 2021-01-21T06:58:01 | 2021-01-21T06:58:01 | 331,528,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | import pyMsql
import gsheet
import json
import re
def get_price(cloth_pattern_number):
ucpn = str(cloth_pattern_number).replace(" ","").upper()
if 'Chiltern'.upper() in ucpn: return 6.95
elif 'Cotswold'.upper() in ucpn:return 8.25
elif '530' in ucpn: return 7.25
elif '531' in ucpn: return 7.25
elif 'Coniston'.upper() in ucpn or '568' in ucpn: return 7.95
elif '3101X' in ucpn: return 11.25
elif 'GS20' in ucpn: return 17.50
elif '3124' in ucpn: return 10.95
elif 'T1' in ucpn: return 9.75
elif 'Yew'.upper() in ucpn: return 8.40
elif "Sycamore".upper() in ucpn: return 8.95
elif '3103' in ucpn: return 7.75
elif 'Calder' in ucpn: return 8.75
elif 'M1A' in ucpn: return 10.25
elif 'M1B' in ucpn: return 9.75
elif 'M25' in ucpn: return 7.95
elif '52' in ucpn: return 7.45
elif 'Haworth'.upper() in ucpn: return 6.95
elif 'Madrid'.upper() in ucpn: return 7.95
elif 'Napoli'.upper() in ucpn: return 8.95
elif 'Shakespeare'.upper() in ucpn: return 6.50
elif 'Turner'.upper() in ucpn: return 6.25
elif 'Tennyson'.upper() in ucpn: return 7.45
elif 'Keats'.upper() in ucpn or 'Wordsworth'.upper() in ucpn: return 5.95
elif 'Blake'.upper() in ucpn: return 6.50
elif 'Rye'.upper() in ucpn: return 4.95
elif 'Byron'.upper() in ucpn: return 4.25
elif 'Dreem'.upper() in ucpn: return 5.45
elif 'Milton'.upper() in ucpn: return 4.50
elif 'Bronte'.upper() in ucpn: return 7.45
elif 'Montecarlo'.upper() in ucpn: return 6.85
elif 'Aztec'.upper() in ucpn: return 5.50
elif 'Pique'.upper() in ucpn: return 6.25
elif 'Althorp'.upper() in ucpn: return 18.75
elif 'Sherbourne'.upper() in ucpn: return 15.5
elif 'Osborne'.upper() in ucpn: return 23.45
elif 'Stowe'.upper() in ucpn: return 21
else:
print("non exist price list ",ucpn )
return 0
return
def brisbane_main():
new_list = []
with open("brisbanMoss_new.json", "r") as f:
new_list = json.load(f)
for item in new_list:
print(item['cloth_pattern_number'])
write_data = {
'cloth_pattern_number': str(item['cloth_pattern_number']).upper(),
'image_url': item['image_url'],
'cloth_bunch': item['cloth_bunch'],
'composition_1': item['compostion1'] if 'compostion1' in item else "",
'supplier_name': 'Brisban Moss',
"weight_gms": item['weight'] if 'weight' in item else "",
"design": item['design'] if 'design' in item else "",
'colour': item['colour'] if 'colour' in item else "",
"width": item['width'] if 'width' in item else "",
"weight_ozs": "",
"selvedge": "",
"dye": "",
"weave": item['weave'] if 'weave' in item else "",
"price_per_meter": float(get_price(item['cloth_pattern_number']))}
try:
pyMsql.save_scabal(write_data)
except Exception as e:
print("save data error", e, item['cloth_pattern_number'])
return
if __name__ == '__main__':
brisbane_main() | [
"[email protected]"
] | |
a3c3810d6ff9529ce543b09f87cac82f6a091422 | 7b772acb8daf788f44191becb32accfee866a06c | /nociones_basicas/indices_slicing.py | 44337fbfbef05e573a56885bfff0b4e07bb96640 | [] | no_license | jeysonrgxd/python | 5793b912f5c0fe1929fdba40b9a4c50ecb49fbad | f3ac045e6ef4862b24da7ff16f5e195cf0bfb7f0 | refs/heads/master | 2023-08-15T22:16:51.767219 | 2021-10-23T17:48:41 | 2021-10-23T17:48:41 | 258,714,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | palabra = "python"
'''
| P | Y | T | H | O | N |
------>
| 0 | 1 | 2 | 3 | 4 | 5 |
<------
| -6 | -5 | -4 | -3 | -2 | -1 |
'''
print(palabra[0])
print(palabra[1])
print(palabra[2])
print(palabra[3])
print(palabra[4])
print(palabra[5])
print("\n")
print(palabra[-6])
print(palabra[-5])
print(palabra[-4])
print(palabra[-3])
print(palabra[-2])
print(palabra[-1])
# Slicing
print('''\n SLICING ''')
print(palabra[:]) #imprime todo python
print(palabra[1:]) #imprime ython osea de la pocision que especificamos para adelante
print(palabra[:5]) #imprime pytho no toma el ultimo especificado "recordar eso"
print(palabra[:99]) # imprime todo devido que especificamos una pocios que supera a la cantidad normal de la palabra
print(palabra[99:]) # me imprime vacio devido a que le espeificamos una pocios que supera a la cantidad y que supuestamente dessde ahi imprimi por ende nada
print(palabra[:-2]) # imprime asta donde especificamos menos ese especificado
print(palabra[-2:])# imprime del especifaco asia adelante
'''
En esta parte especificamos la concatenacion o la creacion de otra palabra utilizando la que tenemos
len(palabra) es una funcion que nos dice la cantidad de lementos que conforman la cadena de texto
'''
concatenacion1 = "T"+palabra[1:len(palabra)]
concatenacion2 = "T"+palabra[1:6]
print(concatenacion1)
print(concatenacion2) | [
"[email protected]"
] | |
5928531477adae181ea7e8284d8747a773a140c6 | 622b9e5f5c58b746574898dda24d4eb992fdc78b | /app/utils/file.py | d03b18fe992083f7e9e862e1f6e03d01829bf98a | [] | no_license | chajeehyung/hackathon-finish | 9953b403e2434c55c31238642b67c8caad950c11 | 3c8b9235ff8e76b4950e6282d8ab5a911c316af6 | refs/heads/master | 2022-12-10T07:32:54.939835 | 2018-11-16T10:16:24 | 2018-11-16T10:16:24 | 157,850,240 | 0 | 0 | null | 2022-12-08T01:17:46 | 2018-11-16T10:15:33 | Jupyter Notebook | UTF-8 | Python | false | false | 536 | py | import urllib.request
# from io import BytesIO
#
# import magic
# import requests
#
# # url로부터 파일을 임시 다운로드
# def download(url):
# response = requests.get(url)
# binary_data = response.content
# temp_file = BytesIO()
# temp_file.write(binary_data)
# temp_file.seek(0)
# return temp_file
#
# # 파일 확장자 추출
# def get_buffer_ext(buffer):
# buffer.seek(0)
# mime_info = magic.from_buffer(buffer.read(), mime=True)
# buffer.seek(0)
# return mime_info.split('/')[-1] | [
"[email protected]"
] | |
4d0dc9942b5c1604a67ef352576e93d48ef56627 | 22319631f2027c3bfa724c283bf7762992336b5b | /wsgi.py | b4080b9296ea0f3fa14dfb463816e253ff9b7db3 | [] | no_license | rodolfolotte/webgis | eefcb7912ae40d3ae353c154ad08d5fe67f75e15 | e27d1d14f08262ea37de6079abc3e7dd12c74bb6 | refs/heads/master | 2022-12-03T02:04:51.409836 | 2020-08-19T17:50:24 | 2020-08-19T17:50:24 | 288,797,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for webgis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webgis.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
ee126199d7cd2366e17798ecf8a532b5b6192bb8 | 1b83e95009f7e883c9da4ec50aa19f9902ccc9ab | /python/dazl/cli/sandbox.py | 0b46a18f9aa01aa4038d3d51f011f02545301360 | [
"Apache-2.0"
] | permissive | ballon3/dazl-client | 327ec6d2a3acbd3748fd03766d93acb2699a7805 | baa9eb975cac26a000fed076f3d1c23569068f5b | refs/heads/master | 2021-02-27T06:29:45.046458 | 2020-02-11T22:03:17 | 2020-02-11T22:03:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | # Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from asyncio import get_event_loop
from argparse import ArgumentParser
from .. import sandbox
from ..util.dar import DamlcPackageError
from ._base import CliCommand
class SandboxCommand(CliCommand):
name = 'sandbox'
hidden = True
def parser(self):
arg_parser = ArgumentParser('dazl sandbox')
arg_parser.add_argument(
'file', metavar='FILE', help='A .daml file to compile into a package, or a .dar file')
arg_parser.add_argument(
'--port', metavar='PORT', default=7600, type=int)
arg_parser.add_argument(
'--log-level')
return arg_parser
def execute(self, args):
try:
with sandbox(args.file, args.port):
get_event_loop().run_forever()
except DamlcPackageError as ex:
return ex.exit_code
| [
"[email protected]"
] | |
8b61f441f981bd4050e418c7940bb4153e5c05ff | 1f813c3cd6a9d293acfbc81f198c64f816a9a95d | /devel/.private/pkg_ros_iot_bridge/lib/python2.7/dist-packages/pkg_ros_iot_bridge/msg/_msgRosIotActionResult.py | f1b43f02572883f9b6a4854923a6d4c6c3dfc006 | [] | no_license | koteshrv/Vargi_Bots_1418 | 5ada79746785a9f9cc0e1d686a1dd2702c9e0f0f | 6bcf843c7150c93caee2b596e0864749c51b6155 | refs/heads/main | 2023-01-06T03:42:53.555701 | 2020-11-08T17:36:59 | 2020-11-08T17:36:59 | 302,916,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,021 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pkg_ros_iot_bridge/msgRosIotActionResult.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import actionlib_msgs.msg
import genpy
import pkg_ros_iot_bridge.msg
import std_msgs.msg
class msgRosIotActionResult(genpy.Message):
_md5sum = "9947ff0cda32e84a022d3d6978fa6c68"
_type = "pkg_ros_iot_bridge/msgRosIotActionResult"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
msgRosIotResult result
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: pkg_ros_iot_bridge/msgRosIotResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# result
int8 final_x
int8 final_y
int8 final_theta
"""
__slots__ = ['header','status','result']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','pkg_ros_iot_bridge/msgRosIotResult']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(msgRosIotActionResult, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = pkg_ros_iot_bridge.msg.msgRosIotResult()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.result = pkg_ros_iot_bridge.msg.msgRosIotResult()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_3b().pack(_x.result.final_x, _x.result.final_y, _x.result.final_theta))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = pkg_ros_iot_bridge.msg.msgRosIotResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.text = str[start:end]
_x = self
start = end
end += 3
(_x.result.final_x, _x.result.final_y, _x.result.final_theta,) = _get_struct_3b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_3b().pack(_x.result.final_x, _x.result.final_y, _x.result.final_theta))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = pkg_ros_iot_bridge.msg.msgRosIotResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.text = str[start:end]
_x = self
start = end
end += 3
(_x.result.final_x, _x.result.final_y, _x.result.final_theta,) = _get_struct_3b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_3b = None
def _get_struct_3b():
global _struct_3b
if _struct_3b is None:
_struct_3b = struct.Struct("<3b")
return _struct_3b
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
| [
"[email protected]"
] | |
00cb52e288e09752cd4ee585a1331f3674043452 | d3b3a68db7d70f85a72d30818ee198bb52f5720a | /venv/bin/pip | f0b1636348c46fac420d468426f2a9f1afd58c5f | [] | no_license | Cloudxtreme/billing-7 | 70cef72acb5979395148e50974f0bddd8f9927bc | 484b18735c38da2010b7fc98451db5abecf616a3 | refs/heads/master | 2021-05-28T22:54:49.313520 | 2015-07-03T03:58:42 | 2015-07-03T03:58:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | #!/Users/nlaird/git/billing/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
285229b76942c122d76afab9973196c4ef8e92cf | 9f613a4cb3496be67ff8e8975f8f2af6ad3823d8 | /DFT.py | 02aabbafe38f2938587b6ba3bbda6c408ca543a9 | [] | no_license | anguyen216/blob-detection | 902156a62e6960015e2c9bd60f3de379e98a5cba | 3ad952655e693f65321a70c0d0ca1cc1f3478b04 | refs/heads/master | 2022-12-14T10:43:58.046624 | 2020-09-11T00:41:06 | 2020-09-11T00:41:06 | 294,551,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | #!/usr/bin/env python3
import numpy as np
def DFT2(image):
"""
Implementation of 2D FFT using a numpy built-in FFT
Input:
- image: 2-D image of grayscale image that need to be transformed
Output:
- 2-D matrix of fourier transform of original image
"""
# only take grayscale image or 2D inputs
assert(np.ndim(image) == 2)
M, N = image.shape
res = np.zeros((M,N), dtype=np.complex128)
# transform rows then columns
for i in range(M):
res[i,:] = np.fft.fft(image[i,:])
for i in range(N):
res[:,i] = np.fft.fft(res[:,i])
return res
def IDFT2(F):
"""
Implementation of 2D IDFT using the above 2D DFT function
This is done by inputing Fourier Transform into DFT and divide
the result by M*N
Input:
- F: 2D signals in frequency domain
Output:
- 2D image of the image in spatial domain
"""
# only take 2-D inputs
assert(np.ndim(F) == 2)
M, N = F.shape
res = DFT2(F.conjugate()) / (M * N)
res = res.conjugate()
return res.real
| [
"[email protected]"
] | |
eb1bf55848c22eab9a285c204849f1f620852fed | 6c4f6ce13304b6a440a44dc175b2a1c796988835 | /etc/gunicorn.conf.py | 252518bc9788cf9b3c4f78c27548487998728d8f | [] | no_license | timmyomahony/bitbucket-jekyll-hook | 6e72dc5797177d0d5b5e7aef24987732f5ac55b3 | 92836af4545e72184c9a93fbbe3b1e12c58d2803 | refs/heads/master | 2021-01-19T12:36:27.791591 | 2019-02-26T09:09:30 | 2019-02-26T09:09:30 | 13,768,277 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | bind = "127.0.0.1:50100"
accesslog = "/path/to/virtualenv/logs/gunicorn/access.log"
errorlog = "/path/to/virtualenv/logs/gunicorn/errors.log"
workers = 1
| [
"[email protected]"
] | |
f5ff027f63b9767fc250dafaf13777bd3b9f2eca | 0473079081af2d3490ba96313914940e00c3eb2e | /RLcode/run_lander.py | 473d41bce17cca17f2777c50ff9c92116d116e95 | [] | no_license | sandra-93/Reinforcement_learning_project | 070a9fe1ca7100c983e9999952bffcd53089ebea | bac0ecded78dbba25b2964b39dd20ce54b5fddfa | refs/heads/master | 2020-12-29T10:18:28.938665 | 2020-02-03T00:04:53 | 2020-02-03T00:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | import linear_policy
import nn_policy
import random_agent
import gym
import sys
def run_lunar_lander(env, num_episodes, policy_function):
"""
runs the lunar lander environment for a specific number of episodes
with a specified policy function.
"""
rewards = []
for episode in range(num_episodes):
observation = env.reset()
episode_reward = 0
while True:
action = policy_function(observation)
observation, reward, done, info = env.step(action)
# You can comment the below line for faster execution
env.render()
episode_reward += reward
if done:
print('Episode: {} Reward: {}'.format(episode, episode_reward))
rewards.append(episode_reward)
break
print('Average reward: %.2f' % (sum(rewards) / len(rewards)))
if __name__=="__main__":
## load environment
env = gym.make('LunarLanderContinuous-v2')
## choose a policy
policy = random_agent.get_action
if len(sys.argv) > 1:
run_lunar_lander(env, num_episodes=int(sys.argv[1]), policy_function = policy)
else:
run_lunar_lander(env, num_episodes=100, policy_function = policy)
| [
"[email protected]"
] | |
3cddee9c805ccbe332f83e058515fb0d2098e8de | bb285aa0fa9203ab07c092c47ff5470fa5034ee9 | /myEnv/bin/chardetect3 | d4c9ac50d31e65d01e518921aa3858d4cf1b4cf4 | [] | no_license | surathjhakal/programming-voting-app | eac7871942ab9ebe488d598ead9692a0b78bacf4 | ed4bd8c517dea889c5ec9240d7151dbc469c5fc7 | refs/heads/master | 2023-04-15T05:35:10.491455 | 2023-04-08T09:24:15 | 2023-04-08T09:24:15 | 339,997,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | #!/home/surath/Desktop/myWork/django-folder/djangoProjects/myEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
7dcadc05edd75d419a5e1e3209018e6647099169 | 1756a97999baa79b2ab066ea548b631ec61506a5 | /cs399_Theater/main/migrations/0001_initial.py | 4e79b19524c09726186cfbf6275bd2921aab601c | [] | no_license | sk367/cs399_Theater | b8dd84a05f3521ac69337bf2f789b2826930791f | 2c1c47b97d5e2db409d009d85c7334ebe204ea74 | refs/heads/master | 2016-09-05T11:24:51.074328 | 2015-02-06T15:49:40 | 2015-02-06T15:49:40 | 29,934,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Events',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'', max_length=128)),
('date', models.DateTimeField()),
('cost', models.DecimalField(max_digits=3, decimal_places=2)),
('genre', models.CharField(default=b'folk', max_length=50)),
('description', models.TextField(default=b'This band plays music')),
],
options={
},
bases=(models.Model,),
),
]
| [
"[email protected]"
] | |
b0dbd6c7e93b2d90b29a38a86057e7e9f6e8d77b | cbbf17379c05ece1bab8a128c03bdca53e74c3e2 | /convolution.py | e137218cd61b457aab775e77d3bb8eac7fd7d99a | [] | no_license | pooplar/Bio2Vec | f75558ad65165fb5cf33976c87324409b56df5ec | a5f781f81169909d3a6716e6e7988d3c0c1e4ca4 | refs/heads/master | 2023-09-02T19:55:51.091149 | 2021-10-21T03:14:19 | 2021-10-21T03:14:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,503 | py | '''
1.Loading data and then divide it into x_train, y_train,x_test, y_test.
2.Forward propagation.
3.Back propagation.
4.Training the network
'''
from keras.models import Sequential
from keras.layers import Conv1D, MaxPooling1D, Input, Dense, Flatten, Dropout,concatenate
from keras.layers.advanced_activations import PReLU
from keras.callbacks import ModelCheckpoint, TensorBoard,EarlyStopping
from util import make_data
# from loading_data import load_data
from keras.models import Model
from sklearn.model_selection import train_test_split
import pickle
import matplotlib.pyplot as plt
from keras.utils import plot_model
import datetime
print("start",datetime.datetime.now())
# define parameters
patience=50
# model_save_path='model/'+"patience_"+str(patience)+"_human_windows_size_3_model"
model_save_path='model/'+"patience_"+str(patience)+"_human_test_2048dim_20000unigram"
'''data'''
max_feature =4096
batch_size = 64
'''convolution layer'''
filters = 64
kernel_size = 2
pool_size = 2
strides = 1
log_dir = 'log_dir/'
acc_loss_file = 'images/human2_concatenate_cnn_acc_loss_sg__human_test_2048dim_20000unigram'
# num_classes = 2
epochs =50
# get data
x, y = make_data()
# expected input data shape: (batch_size, timesteps, data_dim)
x = x.reshape(-1, 1, max_feature)
# split data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=64)
# init = initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=64)
digit_input = Input(shape=(1, max_feature))
x = Conv1D(filters, kernel_size, padding="same")(digit_input)
x = Conv1D(filters, kernel_size, padding="same", )(x)
x = Dropout(0.5)(x)
x=PReLU()(x)
x = MaxPooling1D(pool_size=pool_size, strides=strides, padding="same")(x)
y=Conv1D(32, kernel_size, padding="same")(digit_input)
y = Dropout(0.5)(y)
y=PReLU()(y)
y = MaxPooling1D(pool_size=pool_size, strides=strides, padding="same")(y)
k=Conv1D(128, kernel_size, padding="same")(digit_input)
k = Dropout(0.5)(k)
k=PReLU()(k)
k= MaxPooling1D(pool_size=pool_size, strides=strides, padding="same")(k)
z=concatenate([x,y,k])
z = Flatten()(z)
# x = Flatten()(x)
# x = GRU(gru_output_size, dropout=0.5, recurrent_dropout=0.5)(x)
out = Dense(1, activation='sigmoid')(z)
model = Model(digit_input, out)
model.summary()
print('Compiling the Model...')
model.compile(loss='binary_crossentropy',
optimizer='nadam',
metrics=['accuracy'])
print("Train...")
# store checkpoint file every 5 steps
# checkpointer = ModelCheckpoint(filepath="model/"+"patience_"+str(patience)+"human_test_64dim_20000unigram_"+"weights.{epoch:02d}.hdf5", verbose=1, period=10)
#
# TB = TensorBoard(log_dir=log_dir, write_images=1, histogram_freq=1, write_graph=True)
early_stopping = EarlyStopping(monitor='val_loss',patience=patience)
fit_history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1, shuffle=True,
callbacks=[early_stopping])
'''save final model'''
model.save(model_save_path)
'''plot acc and loss'''
plt.figure(1)
plt.subplot(1, 2, 1)
x = range(1, len(fit_history.history['loss']) + 1)
plt.plot(x, fit_history.history['loss'], 'b', x, fit_history.history['val_loss'], 'r')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.subplot(1, 2, 2)
plt.plot(x, fit_history.history['acc'], 'b', x, fit_history.history['val_acc'], 'r')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(acc_loss_file)
print("Evaluate...")
test_history = model.evaluate(x_test, y_test, batch_size=batch_size)
print('patience: ',patience)
print(model.metrics_names)
print('Test score:', test_history)
print("end",datetime.datetime.now())
# '''
# store Neural Network,include: both graph and weight
# '''
# model.save('model/NN_model.h5')
'''
visualization Neural Network
'''
# plot_model(model, to_file='images/lstm_model.png', show_shapes=True)
'''
store accuracy on matine_Final set and fit_history
'''
# train_acc = fit_history.history['acc']
# train_loss = fit_history.history['loss']
# val_acc = fit_history.history['val_acc']
# val_loss = fit_history.history['val_loss']
# epochs = fit_history.epoch
#
# with open('data/test_set_accuracy.pickle1', mode='wb') as f:
# pickle.dump([test_history, train_acc, train_loss, val_acc, val_loss, epochs], f, -1)
| [
"[email protected]"
] | |
cbb569ecca9d675c16c2e00f858612a271a98327 | d4cbfd493ecc5b9e17ce3c11f303793b3d1f1fae | /Miscellaneous/RemoveDuplicatesFromList.py | 80cbfca933d172227674a63d13cc8f4c997d8c8b | [
"MIT"
] | permissive | ArmenBaghdasaryan14/Algorithms | 45aa18cb26eaa6711ac9aa4b914af905ca61e1b5 | 8581947c8e73d50bb3324377dea7914afc066de6 | refs/heads/master | 2020-12-14T17:06:05.112448 | 2020-02-10T00:37:04 | 2020-02-10T00:37:04 | 234,818,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # Remove duplicates from an unsorted array/list #
# The following method consists in iterating over the list and
# adding each element to a set. By definition, a set consists of
# unique values, therefore, duplicates won't be added.
def remove_duplicate(inputList):
filterSet = set()
for i in inputList:
filterSet.add(i)
return list(filterSet)
sampleList = [-2, 2, 5, 5, 10, 11, 32, 100, 111, 3333]
filteredList = remove_duplicate(sampleList)
| [
"[email protected]"
] | |
b9ab2321337dd00ca4e9a93c95c612a4f9be0b0f | 0591ab396b78fa873187e49e0feb873a9c6bbfb9 | /DjangoUeditor/forms.py | 984de90d4fd8045bbf37632a539daf33d2da8053 | [] | no_license | Xiao01/news | eb7daa2afb5308b56439050c401d88e49b3e9750 | 0c63169772f41af855bebd8e3fa408654c013daf | refs/heads/master | 2022-12-08T23:13:00.057529 | 2019-06-24T08:04:52 | 2019-06-24T08:04:52 | 193,461,512 | 0 | 0 | null | 2022-12-08T01:47:09 | 2019-06-24T08:04:12 | JavaScript | UTF-8 | Python | false | false | 1,427 | py | #coding: utf-8
from django import forms
from .widgets import UEditorWidget
from DjangoUeditor.models import UEditorField as ModelUEditorField
class UEditorField(forms.CharField):
def __init__(self,label,width=600, height=300, toolbars="full",imagePath="", filePath="",upload_settings={},settings={},command=None ,event_handler=None,*args,**kwargs):
uSettings=locals().copy()
del uSettings["self"],uSettings["label"],uSettings["args"],uSettings["kwargs"]
kwargs["widget"]=UEditorWidget(attrs=uSettings)
kwargs["label"]=label
super(UEditorField,self).__init__( *args, **kwargs)
def UpdateUploadPath(model_form,model_inst=None):
""" 遍历model字段,如果是UEditorField则需要重新计算路径 """
if model_inst is not None:
try:
for field in model_inst._meta.fields:
if isinstance(field, ModelUEditorField):
model_form.__getitem__(field.name).field.widget.recalc_path(model_inst)
except:
pass
class UEditorModelForm(forms.ModelForm):
def __init__(self,*args,**kwargs):
super(UEditorModelForm,self).__init__(*args,**kwargs)
try:
if kwargs.has_key("instance"):
UpdateUploadPath(self,kwargs["instance"])
else:
UpdateUploadPath(self,None)
except Exception:
pass
| [
"[email protected]"
] | |
019ef2966945369f5a6c2c5b55fe3039f975488f | a4ea411b64409f72cea45f6014afdf08869b9e28 | /ros/benz_navigation/scripts/wheel_odometry_benz_dirimu.py | 81bee863d8e1706578c59666cdf9035abf10b914 | [] | no_license | SiChiTong/TukubaChallenge | 858a168180e9db5d03446e9917ef8d4093319074 | a4b19bdad743cee3b0d6e131ea09db511b03253e | refs/heads/master | 2021-12-26T06:53:00.057330 | 2017-12-12T12:16:07 | 2017-12-12T12:16:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,908 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import roslib; #roslib.load_manifest('wheel_odometry')
import rospy
import time
from std_msgs.msg import *
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TransformStamped
from numpy import *
from tf.transformations import quaternion_from_euler
import tf
from sensor_msgs.msg import Imu
import string
#=========Initial Parameter=============
D_RIGHT=1.200 #0.245462 # 右パルスの移動量 m? Pulse ratio of the right wheel encoder
D_LEFT =1.198 #0.243768 # 左パルスの移動量 m? (大きいほど移動量増える)Pulse ratio of the left wheel encoder
GR=1.0 #100.0 # パルス値を割る値1 Gear ratio
PR=100.0 #32.0 # パルス値を割る値2 Pulse ratio of encoders
TREAD=1.0 #0.25 #1.002405 # 左右の移動量の差から角度に変換する値(小さいほど急激に変化) Tread
rs=[0,0]; #pluse store
u=array([0,0]);
count=0
odocount=0
encL = 0
encR = 0
encLold = 0
encRold = 0
angularOld = 0.0
imuYawDiff = 0.0
imuYawOld = 0.0
imuYaw = 0.0
imuPitch = 0.0
imuRoll = 0.0
# tfにも出力
bc = tf.TransformBroadcaster()
listener = tf.TransformListener()
#result=open('odo.txt','w')
def imu_callback(data):
global imuYawDiff
global imuYawOld
global imuYaw
e = tf.transformations.euler_from_quaternion((data.orientation.x,data.orientation.y,data.orientation.z,data.orientation.w))
imuYaw = e[2]
#imuYawDiff += (e[2] - imuYawOld)
#imuYawOld = e[2]
def imuserial_callback(data):
global imuYawDiff
global imuYaw
global imuPitch
global imuRoll
global imuYawOld
rospy.loginfo(data)
words = string.split(data.data,",") # Fields split
yaw = -float(words[9]) - 180.0
yaw = PItoPI(yaw*pi/180.0)
imuYaw = yaw
imuYawDiff += PItoPI(yaw - imuYawOld)
imuYawOld = yaw
pitch = float(words[7])
pitch = PItoPI(pitch*pi/180.0)
imuPitch = pitch
roll = float(words[8])
roll = PItoPI(roll*pi/180.0)
imuRoll = roll
#def cmdvel_callback(data):
# global angularOld
# angularOld = data.angular.z
def talker():
rospy.init_node('infant_odometry', anonymous=True)
#rospy.Subscriber("/cmd_vel", Twist, cmdvel_callback)
rospy.Subscriber("/benz/raspi/encR", Int64, odom_callbackR)
rospy.Subscriber("/benz/raspi/encL", Int64, odom_callbackL)
rospy.Subscriber("/imu", Imu, imu_callback)
#rospy.Subscriber("/imu/serial", String, imuserial_callback)
pub = rospy.Publisher('/odom', Odometry, queue_size=10 )
odo=Odometry()
odo.header.frame_id='odom'
odo.child_frame_id='base_link'
current_time=time.time()
last_time=time.time()
#======= Parameter Setting with Parameter Server=======
# * If the parameter server didn't work, the following sequence would be passed.
if rospy.has_param('infant_learning_odometry/right_wheel_p'):
global D_RIGHT
D_RIGHT=rospy.get_param('infant_learning_odometry/right_wheel_p')
if rospy.has_param('infant_learning_odometry/left_wheel_p'):
global D_LEFT
PRIGHT=rospy.get_param('infant_learning_odometry/left_wheel_p')
if rospy.has_param('infant_learning_odometry/Tread'):
global TREAD
TREAD=rospy.get_param('infant_learning_odometry/Tread')
#print PRIGHT
#print PLEFT
#print TREAD
#===========================================================
odo.pose.pose.position.x=0
odo.pose.pose.orientation.w=0
odo.twist.twist.linear.x=0
odo.twist.twist.angular.z=0
r = rospy.Rate(10)
global dt
global x
global count
global imuYaw
global imuPitch
x=array([0,0,toRadian(0.0)]);
dt=0.1
t=0;
while not rospy.is_shutdown():
count=count+1
current_time=time.time()
dt=current_time-last_time
odo.header.seq=count
odo.header.stamp = rospy.Time.now()
odo.pose.pose.position.x=x[0]
odo.pose.pose.position.y=x[1]
roll=imuRoll
pitch=imuPitch
yaw = x[2]
q = quaternion_from_euler(roll,pitch,yaw)
odo.pose.pose.orientation.x = q[0]
odo.pose.pose.orientation.y = q[1]
odo.pose.pose.orientation.z = q[2]
odo.pose.pose.orientation.w = q[3]
odo.twist.twist.linear.x=u[0]
odo.twist.twist.angular.z=u[1]
pub.publish(odo)
# odom tf
if bc is not None:
bc.sendTransform((x[0],x[1],0.0),q,rospy.Time.now(),'base_link','odom')
if count%2000==0:
print "%8.2f" %x[0],"%8.2f" %x[1],"%8.2f" %toDegree(x[2])
#print (last_time-current_time)
#print t
#result.write('%f,%f,%f,%f,%f\n' %(x[0],x[1],x[2],u[0],u[1]))
last_time=time.time()
t+=dt
r.sleep()
rospy.spin()
# u[0] move vec
# u[1] dir
def calc_input(rs):
global imuYawDiff
global imuYaw
u=array([0,0]);
vr=rs[0]/(GR*PR)*pi*D_RIGHT
vl=rs[1]/(GR*PR)*pi*D_LEFT
v=(vr+vl)/2.0
#yawrate=(vr-vl)/(TREAD/2.0)
#u=array([v,yawrate])
#u=array([v,imuYawDiff*9.50])
#u=array([v,imuYaw])
u=array([v,0.0])
imuYawDiff = 0.0
return u
# x[0] x
# x[1] y
# x[2] dir
def MotionModel(x,u,dt):
F=eye(3)
#B=array([[dt*cos(x[2]),0],
# [dt*sin(x[2]),0],
# [0,dt]])
B=array([[dt*cos(imuYaw),0],
[dt*sin(imuYaw),0],
[0,dt]])
x=dot(F,x)+dot(B,u)
x[2]=imuYaw #PItoPI(x[2])
return x
# Pi Limit
def PItoPI(angle):
while angle>=pi:
angle=angle-2*pi
while angle<=-pi:
angle=angle+2*pi
return angle
def odom_callback(data):
global u
global x
global dt
global odocount
rs=[data.pose.pose.position.x,data.pose.pose.position.y]
odocount=data.header.seq
u=calc_input(rs)
x=MotionModel(x,u,dt)
# --------------------------
def odom_callbackR(data):
global encR
if abs(data.data) < 1000:
encR = data.data
def odom_callbackL(data):
global u
global x
global dt
#global odocount
global encL
global encLold
global encRold
global listener
if abs(data.data) < 1000:
encL = data.data
rs=[float(encR),float(encL)]
encRold = encR
encLold = encL
#odocount=data.header.seq
u=calc_input(rs)
x=MotionModel(x,u,dt)
def toDegree(angle):
angle=angle*180.0/pi
return angle
def toRadian(angle):
angle=angle*pi/180.0
return angle
if __name__ == '__main__':
talker()
| [
"[email protected]"
] | |
0840c9f42cbaad01720dc6260a1278c6e4d69ae1 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/nilearn/2017/8/conf.py | 518d965d16cfb73d3a77f680140733488b524154 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 10,384 | py | # -*- coding: utf-8 -*-
#
# nistats documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
import sphinx_gallery
# We also add the directory just above to enable local imports of nistats
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
('sphinx.ext.imgmath' # only available for sphinx >= 1.4
if sphinx.version_info[:2] >= (1, 4)
else 'sphinx.ext.pngmath'),
'sphinx.ext.intersphinx',
'numpydoc.numpydoc',
'sphinx_gallery.gen_gallery',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nistats'
copyright = u'The nistats developers 2010-2016'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
import nistats
release = nistats.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
exclude_patterns = ['tune_toc.rst',
'includes/big_toc_css.rst',
'includes/bigger_toc_css.rst',
]
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nistats'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'nature.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'oldversion':False, 'collapsiblesidebar': False}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "functional MRI for NeuroImaging"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Nistats'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/nistats-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonScientic'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'nistats.tex', u'functional MRI in python',
ur"""Bertrand Thirion"""
+ r"\\\relax ~\\\relax http://nistats.github.io",
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/nistats-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\let\oldfootnote\footnote
\def\footnote#1{\oldfootnote{\small #1}}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_elements = {
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}',
# Get completely rid of index
'printindex': '',
}
# If false, no module index is generated.
latex_use_modindex = False
latex_domain_indices = False
# Show the page numbers in the references
latex_show_pagerefs = True
# Show URLs in footnotes
latex_show_urls = 'footnote'
trim_doctests_flags = True
_python_doc_base = 'http://docs.python.org/2.7'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
_python_doc_base: None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://matplotlib.org/': None,
'http://scikit-learn.org/stable': None,
'http://nipy.org/nibabel': None,
# add line for nilearn
# add line for patsy
#'http://scikit-image.org/docs/0.8.0/': None,
#'http://docs.enthought.com/mayavi/mayavi/': None,
#'http://statsmodels.sourceforge.net/': None,
#'http://pandas.pydata.org': None,
}
extlinks = {
'simple': (_python_doc_base + '/reference/simple_stmts.html#%s', ''),
'compound': (_python_doc_base + '/reference/compound_stmts.html#%s', ''),
}
sphinx_gallery_conf = {
'doc_module' : 'nistats',
'reference_url' : {
'nilearn': 'http://nilearn.github.io/index.html',
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel',
'sklearn': 'http://scikit-learn.org/stable',
'patsy': 'http://patsy.readthedocs.io/en/latest/',
'pandas': 'http://pandas.pydata.org/pandas-docs/stable/'}
}
# Get rid of spurious warnings due to some interaction between
# autosummary and numpydoc. See
# https://github.com/phn/pytpm/issues/3#issuecomment-12133978 for more
# details
numpydoc_show_class_members = False
def touch_example_backreferences(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples
def setup(app):
app.add_javascript('copybutton.js')
app.connect('autodoc-process-docstring', touch_example_backreferences)
| [
"[email protected]"
] | |
dfacabd6e8e05e277bf6e7d67608128c52ab31b9 | 6b984499993b757d094afb773c74bce48629cc8c | /sito/payrol/views.py | 8c16313b245eb41d2fcc5d122cb9c2e23ee8ce8a | [] | no_license | joemash/django-ansible-deploy | 9e5f825e24144f71d1f388f156fbf389bba2c699 | c04f9e3f1ef20620571b3242bbdbf4bf4bd21597 | refs/heads/master | 2020-03-18T16:52:22.006457 | 2019-01-10T21:30:34 | 2019-01-10T21:30:34 | 134,990,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | from django.shortcuts import render
from django.views.generic import TemplateView
from django.http import HttpResponseRedirect,HttpResponse,JsonResponse
class BaseView(TemplateView):
template_name = 'payrol.html'
def post(self,request,*args,**kwargs):
myitems = []
import locale
locale.setlocale(locale.LC_ALL, '')
if request.is_ajax():
salary = float(request.POST.get("salary"))
net_salary = self.calculate_net_salary(salary)
net_paye = self.calculate_net_paye(salary)
nhif = self.calculate_nhif(salary)
#convert the json string to a python object
#json1_data = json.loads(line_items)
data = {
'url':'http://127.0.0.1:8000/',
'salary':net_salary,
'net_paye':net_paye,
'nhif':nhif,
}
return JsonResponse(data)
return super(BaseView,self).get(request)
def calculate_gross_paye(self,gross_pay):
gross_paye = 0.0
nssf = 200
taxable_pay = gross_pay - nssf
if taxable_pay >= 11181 and taxable_pay <= 21713:
gross_paye = 1118.03 + (taxable_pay - 11180) * 0.15
elif taxable_pay >= 21714 and taxable_pay <= 32247:
gross_paye = 2698.07 + (taxable_pay - 21713) * 0.2
elif taxable_pay >= 32248 and taxable_pay <= 42780:
gross_paye = 4804.79 + (taxable_pay - 32247) * 0.25
elif taxable_pay >= 42781:
gross_paye = 7438.18 + (taxable_pay - 42781) * 0.3
return round(gross_paye,1)
def calculate_net_paye(self,gross_pay):
net_paye = 0.0
personal_relief = 1280
if gross_pay > 0:
gross_paye = self.calculate_gross_paye(gross_pay)
if gross_paye > 0:
net_paye = gross_paye - personal_relief
return net_paye
def calculate_nhif(self,salary):
nhif = 0.0
if salary >= 1000 and salary <= 5999:
nhif = 150
elif salary >= 6000 and salary <= 7999:
nhif = 300
elif salary >= 8000 and salary <= 11999:
nhif = 400
elif salary >= 12000 and salary <= 14999:
nhif = 500
elif salary >= 15000 and salary <= 19999:
nhif = 600
elif salary >= 20000 and salary <= 24999:
nhif = 750
elif salary >= 25000 and salary <= 29999:
nhif = 850
elif salary>=30000 and salary <= 34999:
nhif = 900
elif salary >= 35000 and salary <= 39999:
nhif = 950
elif salary>= 40000 and salary <= 44999:
nhif = 1000
elif salary >= 45000 and salary <= 49999:
nhif = 1100
elif salary >= 50000 and salary <= 59999:
nhif = 1200
elif salary >= 60000 and salary <= 69999:
nhif = 1300
elif salary >= 70000 and salary <= 79999:
nhif = 1400
elif salary >= 80000 and salary <= 89999:
nhif = 1500
elif salary >= 90000 and salary <= 99999:
nhif = 1600
elif salary >= 100000:
nhif = 1700
return nhif
def calculate_net_salary(self,gross_pay):
net_pay = 0.0
nssf = 200
if type(gross_pay) != str:
net_pay = gross_pay - nssf
if net_pay > 0:
net_pay -= self.calculate_net_paye(gross_pay) + self.calculate_nhif(gross_pay)
else:
return gross_pay
else:
net_pay = 0.0
return round(net_pay,0)
| [
"[email protected]"
] | |
f852a218085e5ed1cd1d88039a328daaf2000cb3 | 3aca185c65285b4dd37d7fd5b82c225a61d6af6b | /helper/__init__.py | 712e4717a9aa95c1eb7f30697218847a673fca16 | [
"MIT"
] | permissive | m2u/m2u | 04947236d269619ed33b8ea5730ee2ce76307820 | 121a9cded0ff10d10a438c8510ece7f6b0cb6176 | refs/heads/develop | 2021-01-02T08:14:23.231967 | 2017-05-19T19:05:07 | 2017-05-19T19:05:07 | 28,252,425 | 31 | 20 | null | 2017-09-08T07:10:04 | 2014-12-20T01:02:00 | Python | UTF-8 | Python | false | false | 48 | py | from systemhelper import *
from helper import *
| [
"[email protected]"
] | |
83f3c70c6a5fc372506556fa465a2440a9cd0dde | a84acc182863362951b3b5405732c822f92e1d48 | /navier-stokes/data/16x16/t1/ns.py | c03786902725528cf42b877cef49b8eb644c608f | [] | no_license | mulligatawny/me408-final | 86edd7bc82a928bbca6dce3ec9365219b0c2f5f7 | ffbd01c4a0d849ad186c2ade87716d01fb54668f | refs/heads/main | 2023-03-23T07:30:00.253904 | 2021-03-19T23:24:11 | 2021-03-19T23:24:11 | 349,128,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py | ###############################################################################
# 2D Incompressible Navier-Stokes Solver with Periodic Boundaries, Galerkins' #
# Method and Adams-Bashforth Time Integration #
###############################################################################
import numpy as np
import matplotlib.pyplot as plt
from subroutines import compute_fk
from subroutines import compute_vorticity
from subroutines import compute_pressure
from matplotlib import cm
import pdb
def fu(uk, vk):
fku = compute_fk.compute_fk(N, n1, n2, uk, vk, dim=0)
fkv = compute_fk.compute_fk(N, n1, n2, uk, vk, dim=1)
a = np.zeros_like(uk)
d = np.zeros_like(uk)
for i in range(N):
for j in range(N):
# singular k**2
if n1[i] == n2[j] == 0:
a[i,j] = 0
d[i,j] = 0
else:
a[i,j] = (n1[i]*fku[i,j] + n2[j]*fkv[i,j])*\
n1[i]/(n1[i]**2 + n2[j]**2)
d[i,j] = nu*(n1[i]**2 + n2[j]**2)*uk[i,j]
return fku -a -d
def fv(uk, vk):
fku = compute_fk.compute_fk(N, n1, n2, uk, vk, dim=0)
fkv = compute_fk.compute_fk(N, n1, n2, uk, vk, dim=1)
a = np.zeros_like(vk)
d = np.zeros_like(vk)
for i in range(N):
for j in range(N):
if n1[i] == n2[j] == 0:
a[i,j] = 0
d[i,j] = 0
else:
a[i,j] = (n1[i]*fku[i,j] + n2[j]*fkv[i,j]\
)*n2[j]/(n1[i]**2 + n2[j]**2)
d[i,j] = nu*(n1[i]**2 + n2[j]**2)*vk[i,j]
return fkv -a -d
N = 16
L = 2*np.pi
nu = 1
# mesh
x = np.linspace(-L/2, L/2, N+1)[:-1]
y = np.linspace(-L/2, L/2, N+1)[:-1]
X, Y = np.meshgrid(x, y)
# wavenumbers
n1 = np.arange(-N/2, N/2)*(2*np.pi/L)
n2 = np.arange(-N/2, N/2)*(2*np.pi/L)
# initialize
u0 = 0.5*(np.sin(Y+X) +np.sin(Y-X))
v0 = -0.5*(np.sin(X+Y) +np.sin(X-Y))
t = 0.0
tf = 1
dt = 1e-3
nt = int(tf/dt+1)
e = np.zeros(nt)
count = 0
# transform I.C.
uk = np.fft.fftshift(np.fft.fft2(u0))/N**2
vk = np.fft.fftshift(np.fft.fft2(v0))/N**2
# allocate storage for (n+1) and (n-1)th timestep
uknp1 = np.zeros_like(uk)
vknp1 = np.zeros_like(vk)
uknm1 = np.zeros_like(uk)
vknm1 = np.zeros_like(vk)
# first timestep with forward Euler
uknp1 = uk + dt*fu(uk, vk)
vknp1 = vk + dt*fv(uk, vk)
uknm1 = uk
vknm1 = vk
uk = uknp1
vk = vknp1
t = t + dt
# time integrate using Adams-Bashforth
while t < tf:
uknp1 = uk + (dt/2)*(3*fu(uk, vk) - fu(uknm1, vknm1))
vknp1 = vk + (dt/2)*(3*fv(uk, vk) - fv(uknm1, vknm1))
uknm1 = uk
vknm1 = vk
uk = uknp1
vk = vknp1
u = np.real(np.fft.ifft2(np.fft.ifftshift(uk))*(N**2))
v = np.real(np.fft.ifft2(np.fft.ifftshift(vk))*(N**2))
e[count] = (np.mean(u**2) + np.mean(v**2))/2
count = count + 1
t = t + dt
u = np.real(np.fft.ifft2(np.fft.ifftshift(uk))*(N**2))
v = np.real(np.fft.ifft2(np.fft.ifftshift(vk))*(N**2))
w = compute_vorticity.compute_vorticity(N, n1, n2, uk, vk)
p = compute_pressure.compute_pressure(N, n1, n2, uk, vk)
np.save('t.npy', t)
np.save('x.npy', X)
np.save('y.npy', Y)
np.save('u.npy', u)
np.save('v.npy', v)
np.save('w.npy', w)
np.save('p.npy', p)
np.save('e.npy', e)
| [
"[email protected]"
] | |
7143e094b37877a6546a35c155447afe22e4ac98 | ee50c362e090b03276fcda9509632df3f2088292 | /easyirctests/test_connection.py | a65059446ddf4999e1c824fc2cc01ccbbe7605c8 | [
"BSD-2-Clause-Views"
] | permissive | youknowone/easyirc | a208eacaeefb29eca8e83e9691f9ec7f5f136454 | 294630acb4ec931b0deff342faf1904120200650 | refs/heads/master | 2021-01-01T15:44:31.606522 | 2017-12-29T14:59:58 | 2017-12-29T14:59:58 | 9,749,876 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py |
# -*- coding: utf-8 -*-
import time
import pytest
from easyirc.connection import DispatchConnection, CallbackConnection, EventHookConnection
from easyirc.command import protocol
from easyirc.event import EventManager
from easyirc.const import *
from easyirc import util
from mocksocket import MockSocket
from test_socket import socktypes, test_create
import settings
connop = settings.TEST_CONNECTION
@pytest.mark.parametrize(['SocketType'], socktypes)
def test_dispatch(SocketType):
print SocketType
connection = DispatchConnection(None, protocol.manager)
msg = connection.dispatch()
assert msg == CREATED
connection.socket = test_create(SocketType)
connection.connect()
connection.start()
msg = connection.dispatch()
assert msg == CONNECTED
def check_msg(themsg):
while True:
time.sleep(0.1)
msg = connection.dispatch()
if msg is None: continue
parts = util.msgsplit(msg)
if parts.type == PING:
connection.pong(parts[1])
continue
if parts.type == themsg:
break
else:
print msg
connection.nick(connop['nick'])
connection.user(connop['nick'], 'Bot by EasyIRC')
check_msg('375')
connection.join(connop['autojoins'][0])
check_msg(JOIN)
connection.part(connop['autojoins'][0])
check_msg(PART)
connection.quit(u'QUIT MESSAGE')
check_msg('ERROR')
connection.disconnect()
connection.thread.join()
@pytest.mark.parametrize(['SocketType'], socktypes)
def test_callback(SocketType):
print SocketType
def callback(connection, m):
ps = util.msgsplit(m)
chan = connop['autojoins'][0]
if m == CREATED:
connection.socket = test_create(SocketType)
connection.connect()
elif m == CONNECTED:
connection.nick(connop['nick'])
connection.user(connop['nick'], 'Bot by EasyIRC')
elif ps.type == PING:
connection.pong(ps[1])
elif ps.type == '375':
connection.join(chan)
elif ps.type == JOIN:
connection.privmsg(chan, u'test the 콜백')
connection.quit(u'전 이만 갑니다')
elif ps.type == 'ERROR':
print 'END!'
connection.disconnect()
else:
print m
connection = CallbackConnection(callback, protocol.manager)
connection.start()
connection.thread.join()
event = EventManager()
chan = connop['autojoins'][0]
@event.hookmsg(CREATED)
def created(connection, sender):
print 'created?', connection
connection.socket = event.socket
connection.connect()
@event.hookmsg(CONNECTED)
def connected(connection, sender):
print 'connected?', connection
connection.nick(connop['nick'])
connection.user(connop['nick'], 'Bot by EasyIRC')
@event.hookmsg(PING)
def ping(connection, sender, tag):
print 'ping? pong!', connection
connection.pong(tag)
@event.hookmsg('375')
def msgofday(connection, sender, *args):
print 'message of the day!', connection
connection.join(chan)
@event.hookmsg(JOIN)
def join(connection, sender, *args):
print 'joined?', connection
connection.privmsg(chan, u'test the 이벤트훅')
connection.quit(u'전 이만 갑니다')
@event.hookmsg('ERROR')
def error(connection, sender, *args):
print 'error?!', connection
connection.disconnect()
@pytest.mark.parametrize(['SocketType'], socktypes)
def test_dispatch_event(SocketType):
event.socket = test_create(SocketType)
connection = DispatchConnection(event, protocol.manager)
connection.start()
while connection.thread.is_alive():
connection.handle_message()
connection.thread.join()
@pytest.mark.parametrize(['SocketType'], socktypes)
def test_eventhook(SocketType):
event.socket = test_create(SocketType)
connection = EventHookConnection(event, protocol.manager)
connection.start()
connection.thread.join()
if __name__ == '__main__':
test_dispatch(socktypes[0][0])
#test_dispatch(socktypes[1][0])
test_callback(socktypes[0][0])
#test_callback(socktypes[1][0])
test_eventhook(socktypes[0][0])
#test_eventhook(socktypes[1][0])
| [
"[email protected]"
] | |
bd5682cfb8e9da71bcfca361cb7399d2afc94949 | c70e49ec20fbae95e34321d01381859b08e48397 | /12/script2.py | b5c72b706882867b790ec86edbc68cd71c569db1 | [] | no_license | benx45h/advent-2020 | 4c915f1d00c589244ce83ab212790b888d3874a7 | a12f0b8dbf5041131a693d8202585353f88a6e07 | refs/heads/master | 2023-03-31T19:07:32.046285 | 2021-04-02T19:39:38 | 2021-04-02T19:39:38 | 319,415,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | ifile = open('input.txt','r')
lines = ifile.readlines()
lines = [line.strip() for line in lines]
wpos = [10,1]
dpos = [0,0]
for line in lines:
cmd = line[0]
num = int(line[1:])
if(cmd == 'F'):
dpos[0] = dpos[0] + num * wpos[0]
dpos[1] = dpos[1] + num * wpos[1]
if(cmd == 'N'):
wpos[1] = wpos[1] + num
if(cmd == 'S'):
wpos[1] = wpos[1] - num
if(cmd == 'E'):
wpos[0] = wpos[0] + num
if(cmd == 'W'):
wpos[0] = wpos[0] - num
if(cmd == 'R'):
for i in range(int(num/90)):
x = wpos[0]
wpos[0] = wpos[1]
wpos[1] = -x
if(cmd == 'L'):
for i in range(int(num/90)):
x = wpos[0]
wpos[0] = -wpos[1]
wpos[1] = x
man = abs(dpos[0])+abs(dpos[1])
print(man)
| [
"[email protected]"
] | |
10d022c28ea04f0d29c2b03b00f591c964714d6c | 1b7da2f11e509828476e21ca665279602de7b509 | /config/urls.py | 67cbc34adf85f12a64480899e9d4644511448717 | [] | no_license | rosenene/oap | 922f3955e4f3a583e6829eed0d518f2c7f806d58 | 32598b7d6c9d6677c889258f21752878ad30d0a5 | refs/heads/master | 2022-04-26T20:31:37.850145 | 2020-04-16T07:47:19 | 2020-04-16T07:47:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
# from django.conf.urls import url
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from portal.cv_registration import urls as cv_reg_urls
from portal.ega_org import urls as ega_reg_urls
from portal.university_regulator import urls as university_regulator_urls
# from django.contrib.auth.views import LoginView
# from portal.users.views import login_spana
# from portal.users import views
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
# path("", include(views.log), name="login_spana"),
# url("", login_spana, name="l"),
# path("", LoginView.as_view(
# template_name='account/login.html')),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("portal.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
path("info_registration/", include(cv_reg_urls)),
path('university/', include(university_regulator_urls)),
path('ega/', include(ega_reg_urls)),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| [
"[email protected]"
] | |
d1f2ea3914e215f6474197ddc8d3171da4a83c96 | fd7ae9a979302bf868f681a16215d425cb62afe2 | /config2.py | ad060d9fb824bac5d0d5d6148d386788c507f409 | [] | no_license | b0olean/bot-discord | 2e5a9980375db367eee3c62cc6ab4c816188a88f | 604f8da5ac83217beec3af17c7d7b58fed511ee7 | refs/heads/master | 2023-02-19T05:12:58.456059 | 2021-01-20T15:05:07 | 2021-01-20T15:05:07 | 323,875,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | TOKEN2 = 'тут может быть ваш токен'
| [
"[email protected]"
] | |
4a276c24541e49c8fd21b853d4155a98f2eb287a | f6ffcb2111c677916525fb25554ef5b7d2c9cbbe | /blackjax/inference/hmc/trajectory.py | 81f75b3d97167ee2e57ed28debe907fd3e765112 | [
"Apache-2.0"
] | permissive | prashjet/blackjax | 748b4b91c279217af7870ec18516510a3d0799e4 | fe10807d04c8d1c2dcdd51e0db0ba54b4dceb88e | refs/heads/main | 2023-08-11T20:17:40.395768 | 2021-08-05T20:06:22 | 2021-08-05T20:06:22 | 399,069,946 | 0 | 0 | Apache-2.0 | 2021-08-23T11:02:01 | 2021-08-23T11:02:00 | null | UTF-8 | Python | false | false | 22,197 | py | """Procedures to build trajectories for algorithms in the HMC family.
To propose a new state, algorithms in the HMC family generally proceed by [1]_:
1. Sampling a trajectory starting from the initial point;
2. Sampling a new state from this sampled trajectory.
Step (1) ensures that the process is reversible and thus that detailed balance
is respected. The traditional implementation of HMC does not sample a
trajectory, but instead takes a fixed number of steps in the same direction and
flips the momentum of the last state.
We distinguish here between two different methods to sample trajectories: static
and dynamic sampling. In the static setting we sample trajectories with a fixed
number of steps, while in the dynamic setting the total number of steps is
determined by a dynamic termination criterion. Traditional HMC falls in the
former category, NUTS in the latter.
There are also two methods to sample proposals from these trajectories. In the
static setting we first build the trajectory and then sample a proposal from
this trajectory. In the progressive setting we update the proposal as the
trajectory is being sampled. While the former is faster, we risk saturating the
memory by keeping states that will subsequently be discarded.
References
----------
.. [1]: Betancourt, Michael. "A conceptual introduction to Hamiltonian Monte Carlo." arXiv preprint arXiv:1701.02434 (2017).
"""
from typing import Callable, NamedTuple, Tuple
import jax
import jax.numpy as jnp
from blackjax.inference.hmc.integrators import IntegratorState
from blackjax.inference.hmc.proposal import (
Proposal,
progressive_biased_sampling,
progressive_uniform_sampling,
proposal_generator,
)
from blackjax.types import PRNGKey, PyTree
class Trajectory(NamedTuple):
leftmost_state: IntegratorState
rightmost_state: IntegratorState
momentum_sum: PyTree
num_states: int
def append_to_trajectory(trajectory: Trajectory, state: IntegratorState) -> Trajectory:
"""Append a state to the (right of the) trajectory to form a new trajectory."""
momentum_sum = jax.tree_util.tree_multimap(
jnp.add, trajectory.momentum_sum, state.momentum
)
return Trajectory(
trajectory.leftmost_state, state, momentum_sum, trajectory.num_states + 1
)
def reorder_trajectories(
direction: int, trajectory: Trajectory, new_trajectory: Trajectory
) -> Tuple[Trajectory, Trajectory]:
"""Order the two trajectories depending on the direction."""
return jax.lax.cond(
direction > 0,
lambda _: (
trajectory,
new_trajectory,
),
lambda _: (
new_trajectory,
trajectory,
),
operand=None,
)
def merge_trajectories(left_trajectory: Trajectory, right_trajectory: Trajectory):
momentum_sum = jax.tree_util.tree_multimap(
jnp.add, left_trajectory.momentum_sum, right_trajectory.momentum_sum
)
return Trajectory(
left_trajectory.leftmost_state,
right_trajectory.rightmost_state,
momentum_sum,
left_trajectory.num_states + right_trajectory.num_states,
)
# -------------------------------------------------------------------
# Integration
#
# Generating samples by choosing a direction and running the integrator
# several times along this direction. Distinct from sampling.
# -------------------------------------------------------------------
def static_integration(
integrator: Callable,
step_size: float,
num_integration_steps: int,
direction: int = 1,
) -> Callable:
"""Generate a trajectory by integrating several times in one direction."""
directed_step_size = direction * step_size
def integrate(initial_state: IntegratorState) -> IntegratorState:
def one_step(state, _):
state = integrator(state, directed_step_size)
return state, state
last_state, _ = jax.lax.scan(
one_step, initial_state, jnp.arange(num_integration_steps)
)
return last_state
return integrate
class DynamicIntegrationState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_progressive_integration(
integrator: Callable,
kinetic_energy: Callable,
update_termination_state: Callable,
is_criterion_met: Callable,
divergence_threshold: float,
):
"""Integrate a trajectory and update the proposal sequentially in one direction
until the termination criterion is met.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
update_termination_state
Updates the state of the termination mechanism.
is_criterion_met
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we say a transition is divergent.
"""
_, generate_proposal = proposal_generator(kinetic_energy, divergence_threshold)
sample_proposal = progressive_uniform_sampling
def integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
termination_state,
max_num_steps: int,
step_size,
initial_energy,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal sequentially until the termination criterion is met.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy of the subtree)
"""
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
_, integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
def add_one_state(loop_state):
rng_key, integration_state, _ = loop_state
step, proposal, trajectory, termination_state = integration_state
rng_key, proposal_key = jax.random.split(rng_key)
new_state = integrator(trajectory.rightmost_state, direction * step_size)
new_proposal, is_diverging = generate_proposal(initial_energy, new_state)
new_trajectory = append_to_trajectory(trajectory, new_state)
sampled_proposal = sample_proposal(proposal_key, proposal, new_proposal)
new_termination_state = update_termination_state(
termination_state, new_trajectory.momentum_sum, new_state.momentum, step
)
has_terminated = is_criterion_met(
new_termination_state, new_trajectory.momentum_sum, new_state.momentum
)
new_integration_state = DynamicIntegrationState(
step + 1,
sampled_proposal,
new_trajectory,
new_termination_state,
)
return (rng_key, new_integration_state, (is_diverging, has_terminated))
# Take the first step (step 0) that starts the new trajectory with proposal,
# so that at for step k > 0 in the while loop we can just append the new
# state to the trajectory and generate new proposal.
state_step0 = integrator(initial_state, direction * step_size)
initial_proposal, is_diverging = generate_proposal(initial_energy, state_step0)
trajectory0 = Trajectory(state_step0, state_step0, state_step0.momentum, 1)
termination_state0 = update_termination_state(
termination_state, trajectory0.momentum_sum, state_step0.momentum, 0
)
has_terminated = is_criterion_met(
termination_state0, trajectory0.momentum_sum, state_step0.momentum
)
initial_integration_state = DynamicIntegrationState(
1,
initial_proposal,
trajectory0,
termination_state0,
)
_, integration_state, (is_diverging, has_terminated) = jax.lax.while_loop(
do_keep_integrating,
add_one_state,
(rng_key, initial_integration_state, (is_diverging, has_terminated)),
)
step, proposal, trajectory, termination_state = integration_state
# In the while_loop we always extend on the right most direction.
new_trajectory = jax.lax.cond(
direction > 0,
lambda _: trajectory,
lambda _: Trajectory(
trajectory.rightmost_state,
trajectory.leftmost_state,
trajectory.momentum_sum,
trajectory.num_states,
),
operand=None,
)
return (
proposal,
new_trajectory,
termination_state,
is_diverging,
has_terminated,
)
return integrate
def dynamic_recursive_integration(
integrator: Callable,
kinetic_energy: Callable,
uturn_check_fn: Callable,
divergence_threshold: float,
use_robust_uturn_check: bool = False,
):
"""Integrate a trajectory and update the proposal recursively in Python
until the termination criterion is met.
This is the implementation of Algorithm 6 from [1] with multinomial sampling.
The implemenation here is mostly for validating the progressive implementation
to make sure the two are equivalent. The recursive implementation should not
be used for actually sampling as it cannot be jitted and thus likely slow.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
uturn_check_fn
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we say a transition is divergent.
use_robust_uturn_check
Bool to indicate whether to perform additional U turn check between two trajectory.
References
----------
.. [1]: Hoffman, Matthew D., and Andrew Gelman. "The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo." J. Mach. Learn. Res. 15.1 (2014): 1593-1623.
"""
_, generate_proposal = proposal_generator(kinetic_energy, divergence_threshold)
sample_proposal = progressive_uniform_sampling
def buildtree_integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
tree_depth: int,
step_size,
initial_energy: float,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy of the subtree)
"""
if tree_depth == 0:
# Base case - take one leapfrog step in the direction v.
next_state = integrator(initial_state, direction * step_size)
new_proposal, is_diverging = generate_proposal(initial_energy, next_state)
trajectory = Trajectory(next_state, next_state, next_state.momentum, 1)
return (
rng_key,
new_proposal,
trajectory,
is_diverging,
False,
)
else:
(
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
initial_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
# Note that is_diverging and is_turning is inplace updated
if ~is_diverging & ~is_turning:
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
rng_key,
new_proposal,
new_trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
start_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
trajectory = merge_trajectories(left_trajectory, right_trajectory)
if ~is_turning:
is_turning = uturn_check_fn(
trajectory.leftmost_state.momentum,
trajectory.rightmost_state.momentum,
trajectory.momentum_sum,
)
if use_robust_uturn_check & (tree_depth - 1 > 0):
momentum_sum_left = jax.tree_util.tree_multimap(
jnp.add,
left_trajectory.momentum_sum,
right_trajectory.leftmost_state.momentum,
)
is_turning_left = uturn_check_fn(
left_trajectory.leftmost_state.momentum,
right_trajectory.leftmost_state.momentum,
momentum_sum_left,
)
momentum_sum_right = jax.tree_util.tree_multimap(
jnp.add,
left_trajectory.rightmost_state.momentum,
right_trajectory.momentum_sum,
)
is_turning_right = uturn_check_fn(
left_trajectory.rightmost_state.momentum,
right_trajectory.rightmost_state.momentum,
momentum_sum_right,
)
is_turning = is_turning | is_turning_left | is_turning_right
rng_key, proposal_key = jax.random.split(rng_key)
proposal = sample_proposal(proposal_key, proposal, new_proposal)
return (
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
)
return buildtree_integrate
# -------------------------------------------------------------------
# Sampling
#
# Sampling a trajectory by choosing a direction at random and integrating
# the trajectory in this direction. In the simplest case we perform one
# integration step, but can also perform several as is the case in the
# NUTS algorithm.
# -------------------------------------------------------------------
class DynamicExpansionState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_multiplicative_expansion(
trajectory_integrator: Callable,
uturn_check_fn: Callable,
step_size: float,
max_num_expansions: int = 10,
rate: int = 2,
) -> Callable:
"""Sample a trajectory and update the proposal sequentially
until the termination criterion is met.
The trajectory is sampled with the following procedure:
1. Pick a direction at random;
2. Integrate `num_step` steps in this direction;
3. If the integration has stopped prematurely, do not update the proposal;
4. Else if the trajectory is performing a U-turn, return current proposal;
5. Else update proposal, `num_steps = num_steps ** rate` and repeat from (1).
Parameters
----------
trajectory_integrator
A function that runs the symplectic integrators and returns a new proposal
and the integrated trajectory.
uturn_check_fn
Function used to check the U-Turn criterion.
step_size
The step size used by the symplectic integrator.
max_num_expansions
The maximum number of trajectory expansions until the proposal is
returned.
rate
The rate of the geometrical expansion. Typically 2 in NUTS, this is why
the literature often refers to "tree doubling".
"""
proposal_sampler = progressive_biased_sampling
def expand(
rng_key: PRNGKey,
initial_expansion_state: DynamicExpansionState,
initial_energy: float,
):
def do_keep_expanding(loop_state) -> bool:
"""Determine whether we need to keep expanding the trajectory."""
_, expansion_state, (is_diverging, is_turning) = loop_state
return (
(expansion_state.step < max_num_expansions)
& ~is_diverging
& ~is_turning
)
def expand_once(loop_state):
"""Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory starting
from the leftmost or rightmost point of the current trajectory that is
twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
"""
# Q: Should this function be aware of all the elements that need to
# be passed downstream?
rng_key, expansion_state, _ = loop_state
step, proposal, trajectory, termination_state = expansion_state
rng_key, direction_key, trajectory_key, proposal_key = jax.random.split(
rng_key, 4
)
# create new subtrajectory that is twice as long as the current
# trajectory.
direction = jnp.where(jax.random.bernoulli(direction_key), 1, -1)
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
new_proposal,
new_trajectory,
termination_state,
is_diverging,
is_turning_subtree,
) = trajectory_integrator(
trajectory_key,
start_state,
direction,
termination_state,
rate ** step,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
merged_trajectory = merge_trajectories(left_trajectory, right_trajectory)
# update the proposal
# we reject proposals coming from diverging or turning subtrajectories,
# but accumulate average acceptance probabilty across entire trajectory
def update_sum_log_p_accept(inputs):
_, proposal, new_proposal = inputs
return Proposal(
proposal.state,
proposal.energy,
proposal.weight,
jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
),
)
sampled_proposal = jax.lax.cond(
is_diverging | is_turning_subtree,
update_sum_log_p_accept,
lambda x: proposal_sampler(*x),
operand=(proposal_key, proposal, new_proposal),
)
is_turning = uturn_check_fn(
merged_trajectory.leftmost_state.momentum,
merged_trajectory.rightmost_state.momentum,
merged_trajectory.momentum_sum,
)
new_state = DynamicExpansionState(
step + 1, sampled_proposal, merged_trajectory, termination_state
)
info = (is_diverging, is_turning_subtree | is_turning)
return (rng_key, new_state, info)
_, expansion_state, (is_diverging, is_turning) = jax.lax.while_loop(
do_keep_expanding,
expand_once,
(rng_key, initial_expansion_state, (False, False)),
)
return expansion_state, (is_diverging, is_turning)
return expand
| [
"[email protected]"
] | |
59cc5665c38dbcef218d2ee55b0b38ceec76cb87 | eb93eae6d0dc4de84cb90ae0b87843551979e794 | /pytest/test_code/test_employee.py | e3844b67e10c834eb9ae1a36b5fe6ca3761805f4 | [] | no_license | mashilu/hello-python | d891f7a12bc4d2dbd6dacd509ba3d491046bb687 | e5c45a6a4ebc7694c46243790000a7433a68ebf9 | refs/heads/master | 2020-04-28T11:10:51.407808 | 2018-07-20T09:10:55 | 2018-07-20T09:10:55 | 30,918,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
import unittest
from employee import Employee
class TestEmployee(unittest.TestCase):
def setUp(self):
self.employee = Employee('Ma', 'Shilu', 10000)
print('setUp')
def test_give_default_raise(self):
self.employee.give_raise()
self.assertEqual(self.employee.salary, 15000)
print('test_give_default_raise')
def test_give_custom_raise(self):
self.employee.give_raise(10000)
self.assertEqual(self.employee.salary, 20000)
print('test_give_custom_raise')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8be463a908ee6202a38eb8019c0ad190142115ff | 369bcbbdecf3163889358bd340c33b8ec53e52ae | /modules/python/models/train_distributed.py | 3000907a58e6413bcedee108cebf8f3ddab85339 | [
"MIT"
] | permissive | tianap/pepper | 47e9b22d834623a12cae243c85b88cf4b9b5d61e | 1c6d2cdad11489ca873b97566c15f70213ad2824 | refs/heads/master | 2021-02-13T20:28:39.506309 | 2020-02-24T07:42:57 | 2020-02-24T07:42:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,609 | py | import sys
import torch
import os
from tqdm import tqdm
import torch.distributed as dist
import torch.nn as nn
import torch.multiprocessing as mp
# Custom generator for our dataset
from torch.utils.data import DataLoader
from modules.python.models.dataloader import SequenceDataset
from modules.python.TextColor import TextColor
from modules.python.models.ModelHander import ModelHandler
from modules.python.models.test import test
from modules.python.Options import ImageSizeOptions, TrainOptions
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
"""
Train a model and return the model and optimizer trained.
Input:
- A train CSV containing training image set information (usually chr1-18)
Return:
- A trained model
"""
CLASS_WEIGHTS = [1.0, 1.0, 1.0, 1.0, 1.0]
def save_best_model(transducer_model, model_optimizer, hidden_size, layers, epoch,
file_name):
"""
Save the best model
:param transducer_model: A trained model
:param model_optimizer: Model optimizer
:param hidden_size: Number of hidden layers
:param layers: Number of GRU layers to use
:param epoch: Epoch/iteration number
:param file_name: Output file name
:return:
"""
if os.path.isfile(file_name):
os.remove(file_name)
ModelHandler.save_checkpoint({
'model_state_dict': transducer_model.state_dict(),
'model_optimizer': model_optimizer.state_dict(),
'hidden_size': hidden_size,
'gru_layers': layers,
'epochs': epoch,
}, file_name)
sys.stderr.write(TextColor.RED + "\nMODEL SAVED SUCCESSFULLY.\n" + TextColor.END)
def train(train_file, test_file, batch_size, epoch_limit, gpu_mode, num_workers, retrain_model,
retrain_model_path, gru_layers, hidden_size, lr, decay, model_dir, stats_dir, train_mode,
world_size, rank, device_id):
if train_mode is True and rank == 0:
train_loss_logger = open(stats_dir + "train_loss.csv", 'w')
test_loss_logger = open(stats_dir + "test_loss.csv", 'w')
confusion_matrix_logger = open(stats_dir + "confusion_matrix.txt", 'w')
else:
train_loss_logger = None
test_loss_logger = None
confusion_matrix_logger = None
torch.cuda.set_device(device_id)
if rank == 0:
sys.stderr.write(TextColor.PURPLE + 'Loading data\n' + TextColor.END)
train_data_set = SequenceDataset(train_file)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_data_set,
num_replicas=world_size,
rank=rank
)
train_loader = torch.utils.data.DataLoader(
dataset=train_data_set,
batch_size=batch_size,
shuffle=False,
num_workers=0,
pin_memory=True,
sampler=train_sampler)
num_classes = ImageSizeOptions.TOTAL_LABELS
if retrain_model is True:
if os.path.isfile(retrain_model_path) is False:
sys.stderr.write(TextColor.RED + "ERROR: INVALID PATH TO RETRAIN PATH MODEL --retrain_model_path\n")
exit(1)
sys.stderr.write(TextColor.GREEN + "INFO: RETRAIN MODEL LOADING\n" + TextColor.END)
transducer_model, hidden_size, gru_layers, prev_ite = \
ModelHandler.load_simple_model_for_training(retrain_model_path,
input_channels=ImageSizeOptions.IMAGE_CHANNELS,
image_features=ImageSizeOptions.IMAGE_HEIGHT,
seq_len=ImageSizeOptions.SEQ_LENGTH,
num_classes=num_classes)
if train_mode is True:
epoch_limit = prev_ite + epoch_limit
sys.stderr.write(TextColor.GREEN + "INFO: RETRAIN MODEL LOADED\n" + TextColor.END)
else:
transducer_model = ModelHandler.get_new_gru_model(input_channels=ImageSizeOptions.IMAGE_CHANNELS,
image_features=ImageSizeOptions.IMAGE_HEIGHT,
gru_layers=gru_layers,
hidden_size=hidden_size,
num_classes=num_classes)
prev_ite = 0
param_count = sum(p.numel() for p in transducer_model.parameters() if p.requires_grad)
if rank == 0:
sys.stderr.write(TextColor.RED + "INFO: TOTAL TRAINABLE PARAMETERS:\t" + str(param_count) + "\n" + TextColor.END)
model_optimizer = torch.optim.Adam(transducer_model.parameters(), lr=lr, weight_decay=decay)
if retrain_model is True:
sys.stderr.write(TextColor.GREEN + "INFO: OPTIMIZER LOADING\n" + TextColor.END)
model_optimizer = ModelHandler.load_simple_optimizer(model_optimizer, retrain_model_path, gpu_mode)
sys.stderr.write(TextColor.GREEN + "INFO: OPTIMIZER LOADED\n" + TextColor.END)
if gpu_mode:
transducer_model = transducer_model.to(device_id)
transducer_model = nn.parallel.DistributedDataParallel(transducer_model, device_ids=[device_id])
class_weights = torch.Tensor(CLASS_WEIGHTS)
# Loss
criterion = nn.CrossEntropyLoss(class_weights)
if gpu_mode is True:
criterion = criterion.to(device_id)
start_epoch = prev_ite
# Train the Model
if rank == 0:
sys.stderr.write(TextColor.PURPLE + 'Training starting\n' + TextColor.END)
sys.stderr.write(TextColor.BLUE + 'Start: ' + str(start_epoch + 1) + ' End: ' + str(epoch_limit) + "\n")
stats = dict()
stats['loss_epoch'] = []
stats['accuracy_epoch'] = []
for epoch in range(start_epoch, epoch_limit, 1):
total_loss = 0
total_images = 0
if rank == 0:
sys.stderr.write(TextColor.BLUE + 'Train epoch: ' + str(epoch + 1) + "\n")
# make sure the model is in train mode. BN is different in train and eval.
batch_no = 1
if rank == 0:
progress_bar = tqdm(
total=len(train_loader),
ncols=100,
leave=True,
position=rank,
desc="Loss: ",
)
else:
progress_bar = None
transducer_model.train()
for images, labels in train_loader:
labels = labels.type(torch.LongTensor)
images = images.type(torch.FloatTensor)
if gpu_mode:
images = images.to(device_id)
labels = labels.to(device_id)
hidden = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)
if gpu_mode:
hidden = hidden.to(device_id)
for i in range(0, ImageSizeOptions.SEQ_LENGTH, TrainOptions.WINDOW_JUMP):
model_optimizer.zero_grad()
if i + TrainOptions.TRAIN_WINDOW > ImageSizeOptions.SEQ_LENGTH:
break
image_chunk = images[:, i:i+TrainOptions.TRAIN_WINDOW]
label_chunk = labels[:, i:i+TrainOptions.TRAIN_WINDOW]
output_, hidden = transducer_model(image_chunk, hidden)
loss = criterion(output_.contiguous().view(-1, num_classes), label_chunk.contiguous().view(-1))
loss.backward()
model_optimizer.step()
total_loss += loss.item()
total_images += image_chunk.size(0)
hidden = hidden.detach()
# update the progress bar
avg_loss = (total_loss / total_images) if total_images else 0
if train_mode is True and rank == 0:
train_loss_logger.write(str(epoch + 1) + "," + str(batch_no) + "," + str(avg_loss) + "\n")
if rank == 0:
progress_bar.set_description("Loss: " + str(avg_loss))
progress_bar.refresh()
progress_bar.update(1)
batch_no += 1
if rank == 0:
progress_bar.close()
dist.barrier()
if rank == 0:
stats_dictioanry = test(test_file, batch_size, gpu_mode, transducer_model, num_workers,
gru_layers, hidden_size, num_classes=ImageSizeOptions.TOTAL_LABELS)
stats['loss'] = stats_dictioanry['loss']
stats['accuracy'] = stats_dictioanry['accuracy']
stats['loss_epoch'].append((epoch, stats_dictioanry['loss']))
stats['accuracy_epoch'].append((epoch, stats_dictioanry['accuracy']))
dist.barrier()
# update the loggers
if train_mode is True and rank == 0:
# save the model after each epoch
# encoder_model, decoder_model, encoder_optimizer, decoder_optimizer, hidden_size, layers, epoch,
# file_name
save_best_model(transducer_model, model_optimizer,
hidden_size, gru_layers, epoch, model_dir + "_epoch_" + str(epoch + 1) + '_checkpoint.pkl')
test_loss_logger.write(str(epoch + 1) + "," + str(stats['loss']) + "," + str(stats['accuracy']) + "\n")
confusion_matrix_logger.write(str(epoch + 1) + "\n" + str(stats_dictioanry['confusion_matrix']) + "\n")
train_loss_logger.flush()
test_loss_logger.flush()
confusion_matrix_logger.flush()
elif train_mode is False:
# this setup is for hyperband
if epoch + 1 >= 10 and stats['accuracy'] < 98:
sys.stderr.write(TextColor.PURPLE + 'EARLY STOPPING AS THE MODEL NOT DOING WELL\n' + TextColor.END)
return transducer_model, model_optimizer, stats
if rank == 0:
sys.stderr.write(TextColor.PURPLE + 'Finished training\n' + TextColor.END)
return transducer_model, model_optimizer, stats
def cleanup():
dist.destroy_process_group()
def setup(rank, device_ids, args):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=len(device_ids))
train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, \
retrain_model_path, gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, total_callers, \
train_mode = args
# issue with semaphore lock: https://github.com/pytorch/pytorch/issues/2517
# mp.set_start_method('spawn')
# Explicitly setting seed to make sure that models created in two processes
# start from same random weights and biases. https://github.com/pytorch/pytorch/issues/2517
torch.manual_seed(42)
train(train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, retrain_model_path,
gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, train_mode,
total_callers, rank, device_ids[rank])
cleanup()
def train_distributed(train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model,
retrain_model_path, gru_layers, hidden_size, learning_rate, weight_decay, model_dir,
stats_dir, device_ids, total_callers, train_mode):
args = (train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model,
retrain_model_path, gru_layers, hidden_size, learning_rate, weight_decay, model_dir,
stats_dir, total_callers, train_mode)
mp.spawn(setup,
args=(device_ids, args),
nprocs=len(device_ids),
join=True)
| [
"[email protected]"
] | |
31c31dd70f600c146fbe4ca24233f4d5559ece54 | ffbce1ba05737fc8403e91a565bd64b72f7c3ab9 | /.history/vicky/views_20210512104007.py | 995108e3690c927c1bc9fc0cdfe4fa44e10b7300 | [] | no_license | Crixtiancub/Feedback_Vicky2.0 | ffa1f9af8e81abb55540dbb10e3324866d23be2d | bc97740eba40c1edcacc309b8206f94fdee62cb6 | refs/heads/master | 2023-04-12T05:39:21.113454 | 2021-05-19T21:54:28 | 2021-05-19T21:54:28 | 368,983,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | from django.shortcuts import render, redirect
import os
from modelo_IA.archivos_Python.pipeline import Pipeline
from vicky import templates
from .models import *
from django.template.response import TemplateResponse
model = Pipeline()
# Create your views here.
def home(request):
num_visits = request.session.get('num_visits', 3)
if num_visits == 0:
request.session['num_visits'] = 3
if 'Si' in request.POST:
acierto = str(request.POST.get('Si'))
pregunta = str(request.POST.get('retorno_Pregunta'))
respuesta = str(request.POST.get('retorno_Respuesta'))
envio_Pregunta = preguntas_Vicky(
pregunta=pregunta,
respuesta=respuesta,
acierto= acierto
)
envio_Pregunta.save()
print("Pregunta Saved...")
context = {
'num_visits': num_visits,
}
return TemplateResponse(request, 'noVisitas.html')
if 'No' in request.POST:
acierto = str(request.POST.get('No'))
pregunta = str(request.POST.get('retorno_Pregunta'))
respuesta = str(request.POST.get('retorno_Respuesta'))
envio_Pregunta = preguntas_Vicky(
pregunta=pregunta,
respuesta=respuesta,
acierto= acierto
)
envio_Pregunta.save()
print("Pregunta Saved...")
context = {
'num_visits': num_visits,
}
return TemplateResponse(request, 'noVisitas.html')
pregunta = str(request.POST.get('pregunta_Vicky'))
respuesta = model.run_model(pregunta)
context = {
"pregunta":pregunta,
"respuesta":respuesta,
'num_visits': num_visits,
}
return TemplateResponse(request, 'dashboard.html' , context)
if request.POST:
if 'Si' in request.POST:
request.session['num_visits'] -= 1
acierto = str(request.POST.get('Si'))
pregunta = str(request.POST.get('retorno_Pregunta'))
respuesta = str(request.POST.get('retorno_Respuesta'))
envio_Pregunta = preguntas_Vicky(
pregunta=pregunta,
respuesta=respuesta,
acierto= acierto
)
envio_Pregunta.save()
print("Pregunta Saved...")
context = {
'num_visits': num_visits,
}
return TemplateResponse(request, 'dashboard.html' , context)
if 'No' in request.POST:
request.session['num_visits'] -= 1
acierto = str(request.POST.get('No'))
pregunta = str(request.POST.get('retorno_Pregunta'))
respuesta = str(request.POST.get('retorno_Respuesta'))
envio_Pregunta = preguntas_Vicky(
pregunta=pregunta,
respuesta=respuesta,
acierto= acierto
)
envio_Pregunta.save()
print("Pregunta Saved...")
context = {
'num_visits': num_visits,
}
print(context)
return TemplateResponse(request, 'dashboard.html' , context)
# pregunta = str(request.POST.get('retorno_Pregunta'))
# respuesta = str(request.POST.get('retorno_Respuesta'))
pregunta = str(request.POST.get('pregunta_Vicky'))
respuesta = model.run_model(pregunta)
context = {
"pregunta":pregunta,
"respuesta":respuesta,
'num_visits': num_visits,
}
print(context)
return TemplateResponse(request, 'dashboard.html' , context)
else:
contexto = {
'num_visits': num_visits,
}
return render(request, 'dashboard.html', contexto)
def noVisitas(request):
return render(request, 'noVisitas.html')
def respuesta_Vicky(request):
pregunta = str(request.POST['pregunta_Vicky'])
respuesta = model.run_model(pregunta)
context = {
"pregunta":pregunta,
"respuesta":respuesta
}
return render(request, 'respuesta_Vicky.html', context) | [
"[email protected]"
] | |
64ccbfab35ff229a7e7c14c48ddff5226a783295 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/bpycollada/import_collada.py | 1bb5360d157006d7285bc4d686a2d5d8c0514da8 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,215 | py | import os
import math
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
import bpy
from bpy.ops import BPyOpsSubModOp
from bpy_extras.image_utils import load_image
from mathutils import Matrix, Vector
from collada import Collada
from collada.camera import PerspectiveCamera, OrthographicCamera
from collada.common import DaeBrokenRefError
from collada.light import AmbientLight, DirectionalLight, PointLight, SpotLight
from collada.material import Map
from collada.polylist import Polylist, BoundPolylist
from collada.primitive import BoundPrimitive
from collada.scene import Scene, Node, NodeNode, GeometryNode
from collada.triangleset import TriangleSet, BoundTriangleSet
__all__ = ['load']
VENDOR_SPECIFIC = []
COLLADA_NS = 'http://www.collada.org/2005/11/COLLADASchema'
DAE_NS = {'dae': COLLADA_NS}
TRANSPARENCY_RAY_DEPTH = 8
MAX_NAME_LENGTH = 27
def load(op, ctx, filepath=None, **kwargs):
c = Collada(filepath, ignore=[DaeBrokenRefError])
impclass = get_import(c)
imp = impclass(ctx, c, os.path.dirname(filepath), **kwargs)
tf = kwargs['transformation']
with prevented_updates(ctx):
if tf in ('MUL', 'APPLY'):
for i, obj in enumerate(c.scene.objects('geometry')):
b_geoms = imp.geometry(obj)
if tf == 'MUL':
tf_mat = Matrix(obj.matrix)
for b_obj in b_geoms:
b_obj.matrix_world = tf_mat
elif tf == 'PARENT':
_dfs(c.scene, imp.node)
for i, obj in enumerate(c.scene.objects('light')):
imp.light(obj, i)
for obj in c.scene.objects('camera'):
imp.camera(obj)
return {'FINISHED'}
@contextmanager
def prevented_updates(ctx):
""" Stop Blender from funning scene update for each change. Update it
just once the import is finished. """
scene_update = BPyOpsSubModOp._scene_update
setattr(BPyOpsSubModOp, '_scene_update', lambda ctx: None)
yield
setattr(BPyOpsSubModOp, '_scene_update', scene_update)
BPyOpsSubModOp._scene_update(ctx)
def get_import(collada):
for i in VENDOR_SPECIFIC:
if i.match(collada):
return i
return ColladaImport
class ColladaImport(object):
""" Standard COLLADA importer. """
def __init__(self, ctx, collada, basedir, **kwargs):
self._ctx = ctx
self._collada = collada
self._kwargs = kwargs
self._images = {}
self._namecount = 0
self._names = {}
def camera(self, bcam):
bpy.ops.object.add(type='CAMERA')
b_obj = self._ctx.object
b_obj.name = self.name(bcam.original, id(bcam))
b_obj.matrix_world = Matrix(bcam.matrix)
b_cam = b_obj.data
if isinstance(bcam.original, PerspectiveCamera):
b_cam.type = 'PERSP'
prop = b_cam.bl_rna.properties.get('lens_unit')
if 'DEGREES' in prop.enum_items:
b_cam.lens_unit = 'DEGREES'
elif 'FOV' in prop.enum_items:
b_cam.lens_unit = 'FOV'
else:
b_cam.lens_unit = prop.default
b_cam.angle = math.radians(max(
bcam.xfov or bcam.yfov,
bcam.yfov or bcam.xfov))
elif isinstance(bcam.original, OrthographicCamera):
b_cam.type = 'ORTHO'
b_cam.ortho_scale = max(
bcam.xmag or bcam.ymag,
bcam.ymag or bcam.xmag)
if bcam.znear:
b_cam.clip_start = bcam.znear
if bcam.zfar:
b_cam.clip_end = bcam.zfar
def geometry(self, bgeom):
b_materials = {}
for sym, matnode in bgeom.materialnodebysymbol.items():
mat = matnode.target
b_matname = self.name(mat)
if b_matname not in bpy.data.materials:
b_matname = self.material(mat, b_matname)
b_materials[sym] = bpy.data.materials[b_matname]
primitives = bgeom.original.primitives
if self._transform('APPLY'):
primitives = bgeom.primitives()
b_geoms = []
for i, p in enumerate(primitives):
if isinstance(p, BoundPrimitive):
b_mat_key = p.original.material
else:
b_mat_key = p.material
b_mat = b_materials.get(b_mat_key, None)
b_meshname = self.name(bgeom.original, i)
if isinstance(p, (TriangleSet, BoundTriangleSet)):
b_mesh = self.geometry_triangleset(
p, b_meshname, b_mat)
elif isinstance(p, (Polylist, BoundPolylist)):
b_mesh = self.geometry_triangleset(
p.triangleset(), b_meshname, b_mat)
else:
continue
if not b_mesh:
continue
b_obj = bpy.data.objects.new(b_meshname, b_mesh)
b_obj.data = b_mesh
self._ctx.scene.objects.link(b_obj)
self._ctx.scene.objects.active = b_obj
if len(b_obj.material_slots) == 0:
bpy.ops.object.material_slot_add()
b_obj.material_slots[0].link = 'OBJECT'
b_obj.material_slots[0].material = b_mat
b_obj.active_material = b_mat
if self._transform('APPLY'):
# TODO import normals
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.mode_set(mode='OBJECT')
b_geoms.append(b_obj)
return b_geoms
def geometry_triangleset(self, triset, b_name, b_mat):
if not self._transform('APPLY') and b_name in bpy.data.meshes:
# with applied transformation, mesh reuse is not possible
return bpy.data.meshes[b_name]
else:
if triset.vertex_index is None or \
not len(triset.vertex_index):
return
b_mesh = bpy.data.meshes.new(b_name)
b_mesh.vertices.add(len(triset.vertex))
b_mesh.tessfaces.add(len(triset))
for i, vertex in enumerate(triset.vertex):
b_mesh.vertices[i].co = vertex
# eekadoodle
eekadoodle_faces = [v
for f in triset.vertex_index
for v in _eekadoodle_face(*f)]
b_mesh.tessfaces.foreach_set(
'vertices_raw', eekadoodle_faces)
has_normal = (triset.normal_index is not None)
has_uv = (len(triset.texcoord_indexset) > 0)
if has_normal:
# TODO import normals
for i, f in enumerate(b_mesh.tessfaces):
f.use_smooth = not _is_flat_face(
triset.normal[triset.normal_index[i]])
if has_uv:
for j in range(len(triset.texcoord_indexset)):
self.texcoord_layer(
triset,
triset.texcoordset[j],
triset.texcoord_indexset[j],
b_mesh,
b_mat)
b_mesh.update()
return b_mesh
def texcoord_layer(self, triset, texcoord, index, b_mesh, b_mat):
b_mesh.uv_textures.new()
for i, f in enumerate(b_mesh.tessfaces):
t1, t2, t3 = index[i]
tface = b_mesh.tessface_uv_textures[-1].data[i]
# eekadoodle
if triset.vertex_index[i][2] == 0:
t1, t2, t3 = t3, t1, t2
tface.uv1 = texcoord[t1]
tface.uv2 = texcoord[t2]
tface.uv3 = texcoord[t3]
def light(self, light, i):
if isinstance(light.original, AmbientLight):
return
b_name = self.name(light.original, i)
if b_name not in bpy.data.lamps:
if isinstance(light.original, DirectionalLight):
b_lamp = bpy.data.lamps.new(b_name, type='SUN')
elif isinstance(light.original, PointLight):
b_lamp = bpy.data.lamps.new(b_name, type='POINT')
b_obj = bpy.data.objects.new(b_name, b_lamp)
self._ctx.scene.objects.link(b_obj)
b_obj.matrix_world = Matrix.Translation(light.position)
elif isinstance(light.original, SpotLight):
b_lamp = bpy.data.lamps.new(b_name, type='SPOT')
def material(self, mat, b_name):
effect = mat.effect
b_mat = bpy.data.materials.new(b_name)
b_name = b_mat.name
b_mat.diffuse_shader = 'LAMBERT'
getattr(self, 'rendering_' + \
effect.shadingtype)(mat, b_mat)
bpy.data.materials[b_name].use_transparent_shadows = \
self._kwargs.get('transparent_shadows', False)
if effect.emission:
b_mat.emit = sum(effect.emission[:3]) / 3.0
self.rendering_transparency(effect, b_mat)
self.rendering_reflectivity(effect, b_mat)
return b_name
def node(self, node, parent):
if isinstance(node, (Node, NodeNode)):
b_obj = bpy.data.objects.new(self.name(node), None)
b_obj.matrix_world = Matrix(node.matrix)
self._ctx.scene.objects.link(b_obj)
if parent:
b_obj.parent = parent
parent = b_obj
elif isinstance(node, GeometryNode):
for bgeom in node.objects('geometry'):
b_geoms = self.geometry(bgeom)
for b_obj in b_geoms:
b_obj.parent = parent
return parent
def rendering_blinn(self, mat, b_mat):
effect = mat.effect
b_mat.specular_shader = 'BLINN'
self.rendering_diffuse(effect.diffuse, b_mat)
self.rendering_specular(effect, b_mat)
def rendering_constant(self, mat, b_mat):
b_mat.use_shadeless = True
def rendering_lambert(self, mat, b_mat):
effect = mat.effect
self.rendering_diffuse(effect.diffuse, b_mat)
b_mat.specular_intensity = 0.0
def rendering_phong(self, mat, b_mat):
effect = mat.effect
b_mat.specular_shader = 'PHONG'
self.rendering_diffuse(effect.diffuse, b_mat)
self.rendering_specular(effect, b_mat)
def rendering_diffuse(self, diffuse, b_mat):
b_mat.diffuse_intensity = 1.0
diff = self.color_or_texture(diffuse, b_mat)
if isinstance(diff, tuple):
b_mat.diffuse_color = diff
else:
diff.use_map_color_diffuse = True
def rendering_specular(self, effect, b_mat):
if effect.specular:
b_mat.specular_intensity = 1.0
b_mat.specular_color = effect.specular[:3]
if effect.shininess:
b_mat.specular_hardness = effect.shininess
def rendering_reflectivity(self, effect, b_mat):
if effect.reflectivity and effect.reflectivity > 0:
b_mat.raytrace_mirror.use = True
b_mat.raytrace_mirror.reflect_factor = effect.reflectivity
if effect.reflective:
refi = self.color_or_texture(effect.reflective, b_mat)
if isinstance(refi, tuple):
b_mat.mirror_color = refi
else:
# TODO use_map_mirror or use_map_raymir ?
pass
def rendering_transparency(self, effect, b_mat):
if not effect.transparency:
return
if isinstance(effect.transparency, float):
if effect.transparency < 1.0:
b_mat.use_transparency = True
b_mat.alpha = effect.transparency
if self._kwargs.get('raytrace_transparency', False):
b_mat.transparency_method = 'RAYTRACE'
b_mat.raytrace_transparency.ior = 1.0
b_mat.raytrace_transparency.depth = TRANSPARENCY_RAY_DEPTH
if isinstance(effect.index_of_refraction, float):
b_mat.transparency_method = 'RAYTRACE'
b_mat.raytrace_transparency.ior = effect.index_of_refraction
b_mat.raytrace_transparency.depth = TRANSPARENCY_RAY_DEPTH
def color_or_texture(self, color_or_texture, b_mat):
if isinstance(color_or_texture, Map):
image = color_or_texture.sampler.surface.image
mtex = self.try_texture(image, b_mat)
return mtex or (1., 0., 0.)
elif isinstance(color_or_texture, tuple):
return color_or_texture[:3]
def try_texture(self, c_image, b_mat):
mtex = None
with self._tmpwrite(c_image.path, c_image.data) as tmp:
image = load_image(tmp)
if image is not None:
image.pack(True)
texture = bpy.data.textures.new(name='Kd', type='IMAGE')
texture.image = image
mtex = b_mat.texture_slots.add()
mtex.texture_coords = 'UV'
mtex.texture = texture
self._images[b_mat.name] = image
return mtex
def name(self, obj, index=0):
""" Trying to get efficient and human readable name, workarounds
Blender's object name limitations.
"""
if hasattr(obj, 'id'):
uid = obj.id.replace('material', 'm')
else:
self._namecount += 1
uid = 'Untitled.' + str(self._namecount)
base = '%s-%d' % (uid, index)
if base not in self._names:
self._namecount += 1
self._names[base] = '%s-%.4d' % (base[:MAX_NAME_LENGTH], self._namecount)
return self._names[base]
@contextmanager
def _tmpwrite(self, relpath, data):
with NamedTemporaryFile(suffix='.' + relpath.split('.')[-1]) as out:
out.write(data)
out.flush()
yield out.name
def _transform(self, t):
return self._kwargs['transformation'] == t
class SketchUpImport(ColladaImport):
""" SketchUp specific COLLADA import. """
def rendering_diffuse(self, diffuse, b_mat):
""" Imports PNG textures with alpha channel. """
ColladaImport.rendering_diffuse(self, diffuse, b_mat)
if isinstance(diffuse, Map):
if b_mat.name in self._images:
image = self._images[b_mat.name]
if image.depth == 32:
diffslot = None
for ts in b_mat.texture_slots:
if ts and ts.use_map_color_diffuse:
diffslot = ts
break
if not diffslot:
return
image.use_alpha = True
diffslot.use_map_alpha = True
tex = diffslot.texture
tex.use_mipmap = True
tex.use_interpolation = True
tex.use_alpha = True
b_mat.use_transparency = True
b_mat.alpha = 0.0
if self._kwargs.get('raytrace_transparency', False):
b_mat.transparency_method = 'RAYTRACE'
b_mat.raytrace_transparency.ior = 1.0
b_mat.raytrace_transparency.depth = TRANSPARENCY_RAY_DEPTH
def rendering_phong(self, mat, b_mat):
super().rendering_lambert(mat, b_mat)
def rendering_reflectivity(self, effect, b_mat):
""" There are no reflectivity controls in SketchUp """
if not self.__class__.test2(effect.xmlnode):
ColladaImport.rendering_reflectivity(self, effect, b_mat)
@classmethod
def match(cls, collada):
xml = collada.xmlnode
return cls.test1(xml) or cls.test2(xml)
@classmethod
def test1(cls, xml):
src = [xml.find('.//dae:instance_visual_scene',
namespaces=DAE_NS).get('url')]
at = xml.find('.//dae:authoring_tool', namespaces=DAE_NS)
if at is not None:
src.append(at.text)
return any(['SketchUp' in s for s in src if s])
@classmethod
def test2(cls, xml):
et = xml.findall('.//dae:extra/dae:technique',
namespaces=DAE_NS)
return len(et) and any([
t.get('profile') == 'GOOGLEEARTH'
for t in et])
VENDOR_SPECIFIC.append(SketchUpImport)
def _is_flat_face(normal):
a = Vector(normal[0])
for n in normal[1:]:
dp = a.dot(Vector(n))
if dp < 0.99999 or dp > 1.00001:
return False
return True
def _eekadoodle_face(v1, v2, v3):
return v3 == 0 and (v3, v1, v2, 0) or (v1, v2, v3, 0)
def _children(node):
if isinstance(node, Scene):
return node.nodes
elif isinstance(node, Node):
return node.children
elif isinstance(node, NodeNode):
return node.node.children
else:
return []
def _dfs(node, cb, parent=None):
""" Depth first search taking a callback function.
Its return value will be passed recursively as a parent argument.
:param node: COLLADA node
:param callable cb:
"""
parent = cb(node, parent)
for child in _children(node):
_dfs(child, cb, parent)
| [
"[email protected]"
] | |
8cd818444c27b4f781c9fcb4f938acbe5fb6b0aa | 97c5af22476b1efb5cacb91686a2e704f4249437 | /Topic/recieve_logs_topic.py | 49400f7daf78dbb7fb53cfd52637148c4d8c228d | [] | no_license | mcclayac/RabbitMQ_python | 5b3a6f8604123901eb63878c5a37e216d7e6c60b | f2a743b1187499bc79a6b0045b508f92c457243b | refs/heads/master | 2021-01-11T02:50:12.542891 | 2016-10-15T13:00:57 | 2016-10-15T13:00:57 | 70,917,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | __author__ = 'anthonymcclay'
__project__ = 'RabbitMQ'
__date__ = '10/15/16'
__revision__ = '$'
__revision_date__ = '$'
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='topic_logs',
type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='topic_logs',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r:%r" % (method.routing_key, body))
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming() | [
"[email protected]"
] | |
5048510bf141c45aed2624550198d8896bcd32cb | a6ccd2da44e4b1d91e562dc68ad49f8396f8bd4c | /main.py | a6b37d1827596dec7fc6ac48f55d6b5aafc746fa | [] | no_license | Zarreen-Davis/Password_Generator | 39cd1cebd5837b97b7eb16d340f5719a5f064a06 | 774ac2e3b47956464efe08ba9acec339863847fe | refs/heads/master | 2023-01-02T08:39:45.228744 | 2020-10-27T18:57:06 | 2020-10-27T18:57:06 | 307,798,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | #enter and reenter password to ensure they match
password= input ("Please create a password: ")
password_2 = input ("Please re-enter you password: ")
#if passwords don't match an invalid message is displayed
if password != password_2:
print("Invalid! These passwords don't match.")
#checks password length is valid. Must be more than 6 characters but less than 20 characters
if len(password_2) <6:
print("Invalid! Password must be more than 6 characters.")
elif len(password_2) >20:
print("Invalid! Password must be less than 20 characters.")
#weak password contains just lower and/or uppercase characters
#medium strength password contains lowercase, uppercase and numbers
#strong password contains uppercase and lowercase characters, number and symbols
lowercase_found = 0
uppercase_found = 0
digit_found = 0
Specialchar_found = 0
for char in password_2:
if char.islower():
lowercase_found = 1
if char.isupper():
uppercase_found = 1
if char.isdigit():
digit_found = 1
if char=='$'or char=='#' or char=='%' or char=="!":
Specialchar_found = 1
if lowercase_found and uppercase_found and digit_found and Specialchar_found:
break
password_strength = lowercase_found + uppercase_found + digit_found + Specialchar_found
if password_strength == 1:
print("You have entered a weak password!")
elif password_strength ==2:
print("You have entered a weak password!")
elif password_strength ==3:
print("You have entered a medium strength password.")
else:
print("You have entered a strong password.") | [
"[email protected]"
] | |
33b7fafb2c489da4c821b0f2d6c5bd2e4a4f86d4 | 28e6ef83fa54713c70a3f0115d390ddfa91d00a1 | /mmfeat/miner/freesound.py | e3ac502f630f5960767499f4963d54bf5d05871e | [
"BSD-3-Clause"
] | permissive | braingineer/mmfeat | 11029fb680460a242ce71807a2d22b31adda6c35 | fdb59c4f60f5d910722ab72b469b7e317046e2d2 | refs/heads/master | 2020-12-27T15:15:05.376984 | 2016-08-15T15:23:27 | 2016-08-15T15:23:27 | 66,103,807 | 1 | 0 | null | 2016-08-19T18:27:23 | 2016-08-19T18:27:21 | Python | UTF-8 | Python | false | false | 3,181 | py | '''
FreeSound API miner
'''
import os
import requests
import shutil
import time
import urllib
from .base import BaseMiner
class FreeSoundResult(object):
def __init__(self, result, api_key):
self.ID = result['id']
self.format_url = 'http://www.freesound.org/apiv2/sounds/{}/?token={}'
self.url = self.format_url.format(self.ID, api_key)
self.format = 'audio/ogg' # FreeSound default
class FreeSoundMiner(BaseMiner):
def __init__(self, save_dir, config_path='./miner.yaml'):
super(FreeSoundMiner, self).__init__(save_dir, config_path)
self.__engine__ = 'freesound'
self.api_keys = self.config['freesound']['api-keys']
self.format_url = 'http://www.freesound.org/apiv2/search/text/?fields=id&{}'
self.page_size = 150 # maximum
def getUrl(self, query, limit, offset):
filters = {
'tag': query,
'duration': '[0 TO 120]'
}
self.filter = ' '.join([key + ':' + filters[key] for key in filters])
full_query = urllib.urlencode({
'token': self.api_keys[self.cur_api_key],
'page_size': limit,
'page': offset,
'filter': self.filter
})
return self.format_url.format(full_query)
def _search(self, query, limit=20, offset=1):
url = self.getUrl(query, limit, offset)
print url
r = requests.get(url)
try:
results = r.json()
except ValueError:
print('ERR: Request returned with code %s (%s)' % (r.status_code, r.text))
time.sleep(self.sleep_time)
print results
return [FreeSoundResult(res, self.api_keys[self.cur_api_key]) \
for res in results['results']], results['next'] is not None
def search(self, query, limit=20):
page = 1
results, isMoreLeft = self._search(query, limit, page)
page = 2
while isMoreLeft and len(results) < limit:
print page, isMoreLeft
max = limit - len(results)
more_results, isMoreLeft = self._search(query, max, page)
results += more_results
page += 1
return results
def saveFile(self, result):
'''
result: result object (FreesoundResult)
'''
if result.format in ['audio/ogg']:
format = 'ogg'
else: # unknown format, skipping
return None
fname = '%s.%s' % (result.ID, format)
path = '%s/%s' % (self.save_dir, fname)
if os.path.exists(path):
print('%s - already exists' % fname)
return
# download the file (specifically, the high-quality ogg preview)
try:
r = requests.get(result.url, timeout=20)
response = r.json()
r = requests.get(response['previews']['preview-hq-ogg'], \
stream=True, timeout=20)
r.raw.decode_content = True
with open(path, 'wb') as fw:
shutil.copyfileobj(r.raw, fw)
except:
print(fname, 'error saving')
return None
print(fname)
return fname
| [
"[email protected]"
] | |
ea2f67c98f18b60516584d6e34cfb6f1083eee5d | 19a39bc9ec8c300f60e17a61cd4c6ec911842573 | /count_coins_and_bills.py | 3796cf18cdb818d95dbb4a39ea83cd1e52307568 | [] | no_license | caiosuzuki/conta-moedas-e-notas | 07f6d89b2be43eaa2508087de550e985e7822d86 | 3843d6c9e70c51b57f5666dfb307dc6601a70b5c | refs/heads/master | 2020-08-27T15:01:27.400193 | 2019-11-09T01:26:05 | 2019-11-09T01:26:05 | 217,414,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,968 | py | import cv2 as cv
import numpy as np
MATH_MORPH_KERNEL_SIZE = 3
BLUR_KERNEL_SIZE = 11
DEFAULT_INPUT_IMG_PATH = './notas-e-moedas-exemplo/0c2n-3.jpg'
def calculate_area_of_rect(rect):
return rect[1][0] * rect[1][1]
def calculate_rect_ratio(rect):
width = rect[1][0]
height = rect[1][1]
if width > height:
return width / height
else:
return height / width
def count_coins_and_bills_in_image(filename, show_steps=False):
bgr_img = cv.imread(filename, 1)
img = cv.cvtColor(bgr_img, cv.COLOR_BGR2GRAY)
# Tratando tamanho da imagem para ser visualizável enquanto é feito o tuning dos parâmetros
img = cv.resize(img,None,fx=0.25,fy=0.25)
resized_img = img.copy()
if show_steps:
cv.imshow('0 - Resized Original Image', img)
# Utilizando blur para amenizar detalhes internos das notas e moedas como números de desenhos
img = cv.GaussianBlur(img, (BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), cv.BORDER_DEFAULT)
if show_steps:
cv.imshow('1 - Gaussian Blur', img)
# Aplicando transformada de hough para encontrar círculos na imagem
all_circles_found = cv.HoughCircles(img, cv.HOUGH_GRADIENT, dp=0.9, minDist=120, param1=10, param2=40, minRadius=30, maxRadius=60)
# Encontrando círculos que serão considerados como moedas
coins_in_image = 0
if all_circles_found is not None:
all_circles_found = np.uint16(np.around(all_circles_found))
for circle in all_circles_found[0, :]:
# Desenhando círculo na imagem original
cv.circle(resized_img, (circle[0], circle[1]), circle[2], (0, 255, 0), 5)
# Cada círculo conta como uma moeda
coins_in_image += 1
if show_steps:
cv.imshow('2 - Drawn circles', resized_img)
# Binarizando valores de intensidade
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 11, 2)
if show_steps:
cv.imshow('3 - Adaptive Gaussian Thresholding', img)
# Definindo kernel que será utilizado nas operações de morfologia matemática
kernel = cv.getStructuringElement(cv.MORPH_RECT, (MATH_MORPH_KERNEL_SIZE, MATH_MORPH_KERNEL_SIZE))
# Realizando fechamento para fechar fendas que surgem principalmente nas notas,
# evitando que fiquem separadas e cause com que não consigamos transformar
# essas partes no "blob" da nota
img = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel, iterations=5)
if show_steps:
cv.imshow('4 - Closing', img)
# Dilatando as pequenas partes que compõe cada nota para que se juntem e eventualmente
# seja possível identificar os retângulos das notas
img = cv.dilate(img, kernel, iterations = 12)
if show_steps:
cv.imshow('5 - Dilation', img)
bills_in_image = 0
# Encontrando os contours (liga os pontos de mesma intensidade que estão próximos)
contours, _ = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
# Achando os retângulos que possivelmente são uma nota
rect = cv.minAreaRect(contour)
rect_area = calculate_area_of_rect(rect)
rect_ratio = calculate_rect_ratio(rect)
# Assumindo por critério de área e ratio do retângulo se ele é uma nota ou não
if (rect_area > 90000) and 1.2 < rect_ratio < 2.0:
# Criando bounding box correspondente para poder desenhar na imagem
box = cv.boxPoints(rect)
box = np.int0(box)
resized_img = cv.drawContours(resized_img, [box], 0, (0, 255, 0), 3)
bills_in_image += 1
if show_steps:
cv.imshow(f'Coins and bills: {filename}', resized_img)
cv.waitKey(0)
cv.destroyAllWindows()
print(f'{coins_in_image}c{bills_in_image}n')
return coins_in_image, bills_in_image
if __name__ == "__main__":
count_coins_and_bills_in_image(DEFAULT_INPUT_IMG_PATH, show_steps=True) | [
"[email protected]"
] | |
62cbf0a6b75da95841744b5f398c31d871e00a6d | 3591e440d74b5cd40a3fbfedb88ed103f22c9722 | /mysite/polls/migrations/0018_auto_20170425_0041.py | 8b8b6a6752bf58c30b261d59b5eb9ac85b23d893 | [] | no_license | larako/web_4A_S2 | 072b0c597e9771cb9d2a75fc70d89b0d08557a08 | 454a4ede3c588cd37e4ed1416e2ebd1b32c8c5a8 | refs/heads/master | 2021-01-22T22:08:15.722787 | 2017-04-28T15:17:48 | 2017-04-28T15:17:48 | 85,509,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | # Generated by Django 2.0.dev20170317160306 on 2017-04-25 00:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0017_auto_20170424_2342'),
]
operations = [
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('purchased_at', models.DateTimeField(auto_now_add=True)),
('tx', models.CharField(max_length=250)),
('purchaser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.User')),
],
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('location', models.CharField(max_length=250)),
('price', models.DecimalField(decimal_places=2, max_digits=7)),
],
),
migrations.AddField(
model_name='purchase',
name='ressource',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Resource'),
),
]
| [
"[email protected]"
] | |
3bf874bd99dc884003dd3e452e0eeb8d382a3467 | 84b046d70ed94d8db9e8ea82798597c0e1ca7cd2 | /bionlp/data/document.py | a6f8f7a39971b25f022b751e38ec73ade2c124a8 | [
"MIT"
] | permissive | Honghan/ade_ann | 20ead66c051ad57903b7514979b2d624b47eb32e | 1cbbe47a1c6c2af3477f6c0c1398bae683c8d601 | refs/heads/master | 2021-04-30T23:22:39.465704 | 2016-11-04T11:49:50 | 2016-11-04T11:49:50 | 69,241,232 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | class Document:
def __init__(self,SentenceList,id=-1):
self.id=id
self.value=SentenceList
self.attr={}
def __str__(self):
s ="Document id: {0}, value: {1}, attributes: {2}".format(self.id,self.value,self.attr)
return s
| [
"[email protected]"
] | |
915fbfd3f6cb8e8d8b28b0a9abf61ad77b62bb96 | 6e758147e0cf2b9e7440f1b09daef4ad84132a81 | /mySite/offerStream/models.py | 9a7528b51f900cd7f66a6eda9d456097553e7068 | [] | no_license | joha0123/djangoProject | 31fed982dba4298cdc1e4b72e29e87064b1676dc | c700c9b65cd2404674bfd72506fa3e6824a46a6d | refs/heads/master | 2021-01-01T03:46:51.016060 | 2016-05-18T09:58:21 | 2016-05-18T09:58:21 | 58,851,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from django.db import models
class Adress(models.Model):
street = models.CharField(max_length=150)
city = models.CharField(max_length=100)
zipCode = models.CharField(max_length=10)
def __str__(self):
return "%s, %s %s" % (self.street, self.zipCode, self.city)
class Store(models.Model):
name = models.CharField(max_length=50)
description = models.TextField()
adress = models.OneToOneField(Adress, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Offer(models.Model):
creationDate = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100)
detail = models.TextField()
store = models.ForeignKey(Store, on_delete=models.CASCADE)
def __str__(self):
return self.title
| [
"[email protected]"
] | |
e3e0d9bd3cd8849299f63bd764b5c02cc8ae2980 | 699d28b11032360f076ba21aa95ca00111bc97f6 | /calcs.py | 5586e5c93d8a887634807c41463a40fe5a90d03c | [] | no_license | kthyng/ai65 | 840058f8ee717df01a02d8177a94ce5a4cc714db | 5bebedd0033c64cb5a5cf64c8e2f9d9d827bfdd7 | refs/heads/master | 2020-04-05T12:35:02.211547 | 2019-07-28T15:47:54 | 2019-07-28T15:47:54 | 95,168,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,033 | py | '''
Calculations.
'''
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cmocean.cm as cmo
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import cartopy.feature as cfeature
import matplotlib.ticker as mticker
import netCDF4 as netCDF
import argparse
from glob import glob
import numpy as np
from cartopy.io import shapereader
import matplotlib as mpl
mpl.rcParams.update({'font.size': 10})
import scipy.io
import os
import matplotlib.dates as Dates
from plots import re
# set up
locgrid = '/pong/raid/kthyng/froude/ai65/grid.nc'
locmodel = '/pong/raid/kthyng/froude/ai65/OUT/'
grid = netCDF.Dataset(locgrid)
pm = grid['pm'][0,0]; pn = grid['pn'][0,0]
dx = pm**-1; dy = pn**-1
h = grid['h'][:]
lon_psi = grid['lon_psi'][:]; lat_psi = grid['lat_psi'][:]
lon_rho = grid['lon_rho'][:]; lat_rho = grid['lat_rho'][:]
dates = pd.date_range("2006-09-01", "2006-10-01", freq="15T")
ntimes = len(dates)
g = 9.81 # m/s^2
Cd = 3e-3 # friction coefficient
rho0 = 1023.7
def run(twest, teast, tnorth, tsouth, name, doterms=False):
PEeast = np.zeros(ntimes); KEbteast = np.zeros(ntimes); KEbceast = np.zeros(ntimes)
PEwest = np.zeros(ntimes); KEbtwest = np.zeros(ntimes); KEbcwest = np.zeros(ntimes)
PEnorth = np.zeros(ntimes); KEbtnorth = np.zeros(ntimes); KEbcnorth = np.zeros(ntimes)
PEsouth = np.zeros(ntimes); KEbtsouth = np.zeros(ntimes); KEbcsouth = np.zeros(ntimes)
Pbtin = np.zeros(ntimes); Pbtout = np.zeros(ntimes)
Pbcin = np.zeros(ntimes); Pbcout = np.zeros(ntimes)
Pbtwest = np.zeros(ntimes); Pbtnorth = np.zeros(ntimes)
Pbtsouth = np.zeros(ntimes); Pbteast = np.zeros(ntimes)
Pbcwest = np.zeros(ntimes); Pbcnorth = np.zeros(ntimes)
Pbcsouth = np.zeros(ntimes); Pbceast = np.zeros(ntimes)
if doterms:
Pmix = np.zeros(ntimes); Pfriction = np.zeros(ntimes)
Pmom = np.zeros(ntimes)
for i in range(ntimes):
m = netCDF.Dataset(locmodel + 'ocean_his_' + str(i+1).zfill(4) + '.nc')
# Calculate over power dissipation across inlet in time
# Want to have all of these calculations on the psi grid to be at cell edges
for vec in twest: # west
# in y direction need 2 extra indices: 1 for interpolating from rho to psi grid
# and 1 for including the end point
ubarwest = re(m['ubar'][0,vec[0]:vec[1]+2,vec[2]],0).squeeze()
# in y direction need 1 extra index for including the end point
# in x direction need 2 extra for interpolating from rho to psi grid
vbarwest = re(m['vbar'][0,vec[0]:vec[1]+1,vec[2]:vec[2]+2],1).squeeze()
uwest = re(m['u'][0,:, vec[0]:vec[1]+2,vec[2]],1).squeeze()
vwest = re(m['v'][0,:, vec[0]:vec[1]+1,vec[2]:vec[2]+2],2).squeeze()
# need to interpolate from rho to psi grid in both y and x
zetawest = re(re(m['zeta'][0,vec[0]:vec[1]+2,vec[2]:vec[2]+2], 1), 0)
Hwest = re(re(h[vec[0]:vec[1]+2,vec[2]:vec[2]+2], 1), 0) + zetawest # height
PEwest[i] = rho0 * g * dy * (Hwest * ubarwest * zetawest).sum() # potential energy anomaly, sum in y
KEbtwest[i] = 0.5 * rho0 * dy * (Hwest * ubarwest * (ubarwest**2 + vbarwest**2)).sum() # sum in y
dzwest = Hwest.squeeze()/20.
KEbcwest[i] = 0.5 * rho0 * dy * (dzwest * uwest * (uwest**2 + vwest**2)).sum() # sum in z and y
Pbtwest[i] = PEwest[i] + KEbtwest[i]
Pbcwest[i] = PEwest[i] + KEbcwest[i]
for vec in teast: # east
ubareast = re(m['ubar'][0,vec[0]:vec[1]+2,vec[2]],0).squeeze()
vbareast = re(m['vbar'][0,vec[0]:vec[1]+1,vec[2]:vec[2]+2],1).squeeze()
ueast = re(m['u'][0,:, vec[0]:vec[1]+2,vec[2]],1).squeeze()
veast = re(m['v'][0,:, vec[0]:vec[1]+1,vec[2]:vec[2]+2],2).squeeze()
zetaeast = re(re(m['zeta'][0,vec[0]:vec[1]+2,vec[2]:vec[2]+2], 1), 0)
Heast = re(re(h[vec[0]:vec[1]+2,vec[2]:vec[2]+2], 1), 0) + zetaeast # height
PEeast[i] = rho0 * g * dy * (Heast * ubareast * zetaeast).sum() # potential energy anomaly, sum in y
KEbteast[i] = 0.5 * rho0 * dy * (Heast * ubareast * (ubareast**2 + vbareast**2)).sum() # sum in y
dzeast = Heast.squeeze()/20.
KEbceast[i] = 0.5 * rho0 * dy * (dzeast * ueast * (ueast**2 + veast**2)).sum() # sum in z and y
Pbteast[i] = PEeast[i] + KEbteast[i]
Pbceast[i] = PEeast[i] + KEbceast[i]
for vec in tnorth: # north
vbarnorth = re(m['vbar'][0,vec[0],vec[1]:vec[2]+2],0).squeeze()
# in y direction 2 extra indices for intepolating from rho to psi grid with an extra index
# in x direction 1 extra index to include end point
ubarnorth = re(m['ubar'][0,vec[0]:vec[0]+2,vec[1]:vec[2]+1],0).squeeze()
unorth = re(m['u'][0,:, vec[0]:vec[0]+2,vec[1]:vec[2]+1],1).squeeze()
vnorth = re(m['v'][0,:, vec[0],vec[1]:vec[2]+2],1).squeeze()
zetanorth = re(re(m['zeta'][0,vec[0]:vec[0]+2,vec[1]:vec[2]+2], 1), 0)
Hnorth = re(re(h[vec[0]:vec[0]+2,vec[1]:vec[2]+2],1),0) + zetanorth # height
PEnorth[i] = rho0 * g * dx * (Hnorth * vbarnorth * zetanorth).sum() # potential energy anomaly, sum in y
KEbtnorth[i] = 0.5 * rho0 * dx * (Hnorth * vbarnorth * (ubarnorth**2 + vbarnorth**2)).sum() # sum in y
dznorth = Hnorth.squeeze()/20.
KEbcnorth[i] = 0.5 * rho0 * dx * (dznorth * vnorth * (unorth**2 + vnorth**2)).sum() # sum in z and y
Pbtnorth[i] = PEnorth[i] + KEbtnorth[i]
Pbcnorth[i] = PEnorth[i] + KEbcnorth[i]
for vec in tsouth: # south
vbarsouth = re(m['vbar'][0,vec[0],vec[1]:vec[2]+2],0).squeeze()
ubarsouth = re(m['ubar'][0,vec[0]:vec[0]+2,vec[1]:vec[2]+1],0).squeeze()
usouth = re(m['u'][0,:, vec[0]:vec[0]+2,vec[1]:vec[2]+1],1).squeeze()
vsouth = re(m['v'][0,:, vec[0],vec[1]:vec[2]+2],1).squeeze()
zetasouth = re(re(m['zeta'][0,vec[0]:vec[0]+2,vec[1]:vec[2]+2], 1), 0)
Hsouth = re(re(h[vec[0]:vec[0]+2,vec[1]:vec[2]+2],1),0) + zetasouth # height
PEsouth[i] = rho0 * g * dx * (Hsouth * vbarsouth * zetasouth).sum() # potential energy anomaly, sum in y
KEbtsouth[i] = 0.5 * rho0 * dx * (Hsouth * vbarsouth * (ubarsouth**2 + vbarsouth**2)).sum() # sum in y
dzsouth = Hsouth.squeeze()/20.
KEbcsouth[i] = 0.5 * rho0 * dx * (dzsouth * vsouth * (usouth**2 + vsouth**2)).sum() # sum in z and y
Pbtsouth[i] = PEsouth[i] + KEbtsouth[i]
Pbcsouth[i] = PEsouth[i] + KEbcsouth[i]
if doterms:
## Calculate mixing across inlet in time (buoyancy production)
# in y direction +1 index at south to include rho inside of psi edge
# and +1 index at north to include end point. (Same for x)
AKs = m['AKs'][0, 1:-1, tsouth[0][0]+1:tnorth[0][0]+1, twest[0][2]+1:teast[0][2]+1] # vertical viscosity for tracer
rho = m['rho'][0, :, tsouth[0][0]+1:tnorth[0][0]+1, twest[0][2]+1:teast[0][2]+1] # density
zeta = m['zeta'][0,tsouth[0][0]+1:tnorth[0][0]+1, twest[0][2]+1:teast[0][2]+1]
dz = (h[tsouth[0][0]+1:tnorth[0][0]+1, twest[0][2]+1:teast[0][2]+1] + zeta)/20. # easy since layers are uniform
drhodz = (rho[:-1] - rho[1:])/dz
Pmix[i] = g * dy * dx * (AKs*drhodz*dz).sum()
##
## Calculate momentum across inlet in time (shear production)
AKv = m['AKv'][0, 1:-1, tsouth[0][0]+1:tnorth[0][0]+1, twest[0][2]+1:teast[0][2]+1] # vertical viscosity for momentum
u = re(m['u'][0, :, tsouth[0][0]+1:tnorth[0][0]+1, twest[0][2]:teast[0][2]+1],2)
v = re(m['v'][0, :, tsouth[0][0]:tnorth[0][0]+1, twest[0][2]+1:teast[0][2]+1],1)
dudz = (u[:-1] - u[1:])/dz
dvdz = (v[:-1] - v[1:])/dz
Pmom[i] = rho0 * dy * dx * (AKv*(dudz**2 + dvdz**2)*dz).sum()
##
## Calculate power dissipation due to bottom friction, across domain
# using velocity from bottom of water column
Pfriction[i] = Cd * rho0 * dy * dx * abs((u[0]**2 + v[0]**2)**(3/2)).sum()
##
df = pd.DataFrame(index=dates)
df['PEeast'] = PEeast; df['KEbteast'] = KEbteast; df['KEbceast'] = KEbceast
df['Pbteast'] = Pbteast; df['Pbceast'] = Pbceast;
df['PEwest'] = PEwest; df['KEbtwest'] = KEbtwest; df['KEbcwest'] = KEbcwest
df['Pbtwest'] = Pbtwest; df['Pbcwest'] = Pbcwest;
df['PEnorth'] = PEnorth; df['KEbtnorth'] = KEbtnorth; df['KEbcnorth'] = KEbcnorth
df['Pbtnorth'] = Pbtnorth; df['Pbcnorth'] = Pbcnorth;
df['PEsouth'] = PEsouth; df['KEbtsouth'] = KEbtsouth; df['KEbcsouth'] = KEbcsouth
df['Pbtsouth'] = Pbtsouth; df['Pbcsouth'] = Pbcsouth;
if np.isnan(Pbtwest.sum()):
Pbtwest = np.zeros(Pbtwest.size)
Pbcwest = np.zeros(Pbcwest.size)
if np.isnan(Pbteast.sum()):
Pbteast = np.zeros(Pbteast.size)
Pbceast = np.zeros(Pbceast.size)
if np.isnan(Pbtnorth.sum()):
Pbtnorth = np.zeros(Pbtnorth.size)
Pbcnorth = np.zeros(Pbcnorth.size)
if np.isnan(Pbtsouth.sum()):
Pbtsouth = np.zeros(Pbtsouth.size)
Pbcsouth = np.zeros(Pbcsouth.size)
df['Pbtin'] = Pbtwest - Pbtnorth
df['Pbcin'] = Pbcwest - Pbcnorth
df['Pbtout'] = Pbtsouth - Pbteast
df['Pbcout'] = Pbcsouth - Pbceast
df['Pbt'] = df['Pbtin'] + df['Pbtout']
df['Pbc'] = df['Pbcin'] + df['Pbcout']
if doterms:
df['Pmix'] = Pmix
df['Pmom'] = Pmom
df['Pfriction'] = Pfriction
df.to_csv('savedoutput/power/' + name + '.csv')
# pd.read_csv('savedoutput/power.csv', index_col=0, parse_dates=True)
if __name__ == "__main__":
# parse input arguments
which = 'stripsd50' # 'all' 'center'
doterms = True
if which == 'testtop':
# west transects: [0:1, 2]
twest = np.array([[335, 375, 5]])
# east transects: [0:1, 2]
teast = np.array([[335, 375, 280]])
# north transects: [0, 1:2]
tnorth = np.array([[375,5,280]])
# south transects: [0, 1:2]
tsouth = np.array([[335, 5,280]])
elif which == 'testbottom':
# west transects: [0:1, 2]
twest = np.array([[295, 335, 5]])
# east transects: [0:1, 2]
teast = np.array([[295, 335, 280]])
# north transects: [0, 1:2]
tnorth = np.array([[335,5,280]])
# south transects: [0, 1:2]
tsouth = np.array([[295, 5,280]])
elif which == 'all':
twest = np.array([[0, 444, 0]])
teast = np.array([[0, 444, 296]])
tnorth = np.array([[444, 0, 296]])
tsouth = np.array([[0, 0, 296]])
# choose domain to use, possibly looping. Run function.
if 'strips' not in which:
run(twest, teast, tnorth, tsouth, which)
elif which == 'strips':
for j in range(lon_psi.shape[0]):
name = which + str(j)
fname = 'savedoutput/power/' + name + '.csv'
if os.path.exists(fname):
continue
twest = np.array([[j, j+1, 0]]) # west transects: [0:1, 2]
teast = np.array([[j, j+1, 296]]) # east transects: [0:1, 2]
tnorth = np.array([[j+1, 0, 296]]) # north transects: [0, 1:2]
tsouth = np.array([[j, 0, 296]]) # south transects: [0, 1:2]
run(twest, teast, tnorth, tsouth, name, doterms=doterms)
elif which == 'stripsd10':
dd = 10
for j in range(lon_psi.shape[0])[::dd]:
twest = np.array([[j, j+dd, 0]]) # west transects: [0:1, 2]
teast = np.array([[j, j+dd, 296]]) # east transects: [0:1, 2]
tnorth = np.array([[j+dd, 0, 296]]) # north transects: [0, 1:2]
tsouth = np.array([[j, 0, 296]]) # south transects: [0, 1:2]
run(twest, teast, tnorth, tsouth, which + str(j), doterms=doterms)
elif which == 'stripsd20':
dd = 20
for j in range(lon_psi.shape[0])[::dd]:
twest = np.array([[j, j+dd, 0]]) # west transects: [0:1, 2]
teast = np.array([[j, j+dd, 296]]) # east transects: [0:1, 2]
tnorth = np.array([[j+dd, 0, 296]]) # north transects: [0, 1:2]
tsouth = np.array([[j, 0, 296]]) # south transects: [0, 1:2]
run(twest, teast, tnorth, tsouth, which + str(j), doterms=doterms)
elif which == 'stripsd50':
dd = 50
for j in range(lon_psi.shape[0])[::dd]:
twest = np.array([[j, j+dd, 0]]) # west transects: [0:1, 2]
teast = np.array([[j, j+dd, 296]]) # east transects: [0:1, 2]
tnorth = np.array([[j+dd, 0, 296]]) # north transects: [0, 1:2]
tsouth = np.array([[j, 0, 296]]) # south transects: [0, 1:2]
run(twest, teast, tnorth, tsouth, which + str(j), doterms=doterms)
| [
"[email protected]"
] | |
faf221e81366357c9bf8a59f9df555323d2f696f | ab764ffb00c82ad029f45a0d229cd2025f697985 | /main/migrations/0002_auto__add_field_portrit_user_tutorial_completed__add_field_portrit_use.py | 2edf987e934c7ca1df8807a6a6675fc515424318 | [] | no_license | joneath/portrit | d1ae9b346fca29fd905cca300dfbca8b8a51641e | 529047437d459905abb81618c2c1b85358d09a59 | refs/heads/master | 2020-03-29T20:43:27.487989 | 2011-12-14T06:02:34 | 2011-12-14T06:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,095 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Portrit_User.tutorial_completed'
db.add_column('main_portrit_user', 'tutorial_completed', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Portrit_User.given_nomination_count'
db.add_column('main_portrit_user', 'given_nomination_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Portrit_User.recieved_nomination_count'
db.add_column('main_portrit_user', 'recieved_nomination_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Portrit_User.selfish_nomination_count'
db.add_column('main_portrit_user', 'selfish_nomination_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Portrit_User.winning_nomination_count'
db.add_column('main_portrit_user', 'winning_nomination_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Portrit_User.invite_count'
db.add_column('main_portrit_user', 'invite_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Portrit_User.vote_count'
db.add_column('main_portrit_user', 'vote_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Portrit_User.comment_count'
db.add_column('main_portrit_user', 'comment_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Portrit_User.tutorial_completed'
db.delete_column('main_portrit_user', 'tutorial_completed')
# Deleting field 'Portrit_User.given_nomination_count'
db.delete_column('main_portrit_user', 'given_nomination_count')
# Deleting field 'Portrit_User.recieved_nomination_count'
db.delete_column('main_portrit_user', 'recieved_nomination_count')
# Deleting field 'Portrit_User.selfish_nomination_count'
db.delete_column('main_portrit_user', 'selfish_nomination_count')
# Deleting field 'Portrit_User.winning_nomination_count'
db.delete_column('main_portrit_user', 'winning_nomination_count')
# Deleting field 'Portrit_User.invite_count'
db.delete_column('main_portrit_user', 'invite_count')
# Deleting field 'Portrit_User.vote_count'
db.delete_column('main_portrit_user', 'vote_count')
# Deleting field 'Portrit_User.comment_count'
db.delete_column('main_portrit_user', 'comment_count')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.album': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Album'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fid': ('main.models.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'photos': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Photo']", 'null': 'True', 'blank': 'True'})
},
'main.badge': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Badge'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'associates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.FB_User']", 'null': 'True', 'blank': 'True'}),
'badge_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Badge_Category']", 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nomination': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Nomination']", 'null': 'True', 'blank': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Photo']", 'null': 'True', 'blank': 'True'})
},
'main.badge_category': {
'Meta': {'ordering': "['order']", 'object_name': 'Badge_Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'main.comment': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Comment'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_rel_+'", 'null': 'True', 'to': "orm['main.Comment']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.FB_User']"})
},
'main.fb_user': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'FB_User'},
'active_nominations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Nomination']", 'null': 'True', 'blank': 'True'}),
'albums': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Album']", 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fid': ('main.models.BigIntegerField', [], {'unique': 'True', 'null': 'True'}),
'friends': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'friends_rel_+'", 'null': 'True', 'to': "orm['main.FB_User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'winning_photos': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Photo']", 'null': 'True', 'blank': 'True'})
},
'main.nomination': {
'Meta': {'ordering': "['up_votes', 'down_votes', '-created_date']", 'object_name': 'Nomination'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Comment']", 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_vote_count': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'down_votes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nominatee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nominated_user'", 'null': 'True', 'to': "orm['main.FB_User']"}),
'nomination_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Nomination_Category']", 'null': 'True'}),
'nominator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nominator_user'", 'null': 'True', 'to': "orm['main.FB_User']"}),
'up_votes': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.FB_User']", 'null': 'True', 'blank': 'True'}),
'won': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.nomination_category': {
'Meta': {'ordering': "['order']", 'object_name': 'Nomination_Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'main.notification': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Notification'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nomination': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Nomination']", 'null': 'True', 'blank': 'True'}),
'notification_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Notification_Type']", 'null': 'True', 'blank': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.FB_User']", 'null': 'True', 'blank': 'True'})
},
'main.notification_type': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Notification_Type'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'main.photo': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Photo'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fb_source_small': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fid': ('main.models.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nominations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Nomination']", 'null': 'True', 'blank': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'small_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'small_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.portrit_user': {
'Meta': {'ordering': "['-created_date']", 'object_name': 'Portrit_User'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'badges': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Badge']", 'null': 'True', 'blank': 'True'}),
'comment_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'portrit_fb_user'", 'to': "orm['main.FB_User']"}),
'given_nomination_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notifications': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.Notification']", 'null': 'True', 'blank': 'True'}),
'recieved_nomination_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'referred_friends': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['main.FB_User']", 'null': 'True', 'blank': 'True'}),
'selfish_nomination_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'vote_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'winning_nomination_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['main']
| [
"[email protected]"
] | |
7f9f7598857398906030166bc1343996c02e03ee | 1121089b07e29cce020acf76190071600b6879bb | /maison.py | 15d2edbf14ce2f9db2ea0aa1f06580911120c85c | [] | no_license | PapaBaGAYE/Python_Turtle_project | 3dc1db7090a3536bfa608fa65463d89dd9da34df | ec59e734e65cce2fec9e859a7e2c27fc3d14b091 | refs/heads/master | 2023-06-19T08:51:50.932433 | 2021-07-24T12:57:41 | 2021-07-24T12:57:41 | 378,640,412 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | import turtle
import dessinMSDA as des
# Dessin du rectangle en fond noir du bas
des.tl.begin_fill()
des.rectangle(100, 20, 'black')
des.tl.end_fill()
# Dessin du rectangle en fond noir du bas
des.carre(100, 'black')
# Dessin du triangle et son deplacement en haut pour le toit
des.tl.penup()
des.tl.setpos(-20, 100)
des.tl.pendown()
des.triangle(140, "black")
# Dessin des petits carrées et leurs deplacement en haut pour la fenetre
deplacement = 60
for i in range(2):
des.tl.penup()
des.tl.setpos(75, deplacement)
des.tl.pendown()
des.carre(20, "black")
des.tl.penup()
des.tl.setpos(55, deplacement)
des.tl.pendown()
des.carre(20, "black")
deplacement-=20
# Dessin du rectangle et son deplacement pour la porte
des.tl.penup()
des.tl.setpos(10, 0)
des.tl.pendown()
des.rectangle(40, 80, "black")
# Dessin du rectangle en fond noir du bas
des.tl.penup()
des.tl.setpos(5, 1)
des.tl.pendown()
des.tl.begin_fill()
des.rectangle(50, 18, 'white', "white")
des.tl.end_fill()
# Dessin du rectangle pour le toit
des.tl.penup()
des.tl.setpos(75, 150)
des.tl.pendown()
des.tl.begin_fill()
des.rectangle(20, 40, 'black', 'white')
des.tl.end_fill()
turtle.done() | [
"[email protected]"
] | |
752ae727504fb4dbb3e6323303b9b031869a2448 | 71a4cfb6747d728dfc97b29366bf8fdfcd790ac3 | /rest_api/tests/unit/test_submit_requests.py | d1e5ac9f44e270c6cc2451a6c6d442e4e15260ab | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | suparnadhar/SuparnaGit | 430c335934ded1649ae8a2dac387fed0c2ec1f5b | bec2704d8b6bc1802523ec26dcb902f59a747a4d | refs/heads/master | 2021-01-25T00:39:33.420153 | 2017-06-16T18:52:02 | 2017-06-16T18:52:02 | 94,685,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,720 | py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import json
from aiohttp.test_utils import unittest_run_loop
from tests.unit.components import Mocks, BaseApiTest
from sawtooth_sdk.protobuf.validator_pb2 import Message
from sawtooth_rest_api.protobuf import client_pb2
class PostBatchTests(BaseApiTest):
async def get_application(self, loop):
self.set_status_and_stream(
Message.CLIENT_BATCH_SUBMIT_REQUEST,
client_pb2.ClientBatchSubmitRequest,
client_pb2.ClientBatchSubmitResponse)
handlers = self.build_handlers(loop, self.stream)
return self.build_app(loop, '/batches', handlers.submit_batches)
@unittest_run_loop
async def test_post_batch(self):
"""Verifies a POST /batches with one id works properly.
It will receive a Protobuf response with:
- the default status of OK
It should send a Protobuf request with:
- a batches property that matches the batches sent
It should send back a JSON response with:
- a response status of 202
- no data property
- a link property that ends in '/batch_status?id=a'
"""
batches = Mocks.make_batches('a')
self.stream.preset_response()
request = await self.post_batches(batches)
self.stream.assert_valid_request_sent(batches=batches)
self.assertEqual(202, request.status)
response = await request.json()
self.assertNotIn('data', response)
self.assert_has_valid_link(response, '/batch_status?id=a')
@unittest_run_loop
async def test_post_batch_with_validator_error(self):
"""Verifies a POST /batches with a validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
batches = Mocks.make_batches('a')
self.stream.preset_response(self.status.INTERNAL_ERROR)
request = await self.post_batches(batches)
self.assertEqual(500, request.status)
response = await request.json()
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_post_json_batch(self):
"""Verifies a POST /batches with a JSON request body breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 42
"""
request = await self.client.post(
'/batches',
data='{"bad": "data"}',
headers={'content-type': 'application/json'})
self.assertEqual(400, request.status)
response = await request.json()
self.assert_has_valid_error(response, 42)
@unittest_run_loop
async def test_post_invalid_batch(self):
"""Verifies a POST /batches with an invalid batch breaks properly.
It will receive a Protobuf response with:
- a status of INVALID_BATCH
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 30
"""
batches = Mocks.make_batches('bad')
self.stream.preset_response(self.status.INVALID_BATCH)
request = await self.post_batches(batches)
self.assertEqual(400, request.status)
response = await request.json()
self.assert_has_valid_error(response, 30)
@unittest_run_loop
async def test_post_many_batches(self):
"""Verifies a POST /batches with many ids works properly.
It will receive a Protobuf response with:
- the default status of OK
It should send a Protobuf request with:
- a batches property that matches the batches sent
It should send back a JSON response with:
- a response status of 202
- no data property
- a link property that ends in '/batch_status?id=a,b,c'
"""
batches = Mocks.make_batches('a', 'b', 'c')
self.stream.preset_response()
request = await self.post_batches(batches)
self.stream.assert_valid_request_sent(batches=batches)
self.assertEqual(202, request.status)
response = await request.json()
self.assertNotIn('data', response)
self.assert_has_valid_link(response, '/batch_status?id=a,b,c')
@unittest_run_loop
async def test_post_no_batches(self):
"""Verifies a POST /batches with no batches breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 34
"""
request = await self.post_batches([])
self.assertEqual(400, request.status)
response = await request.json()
self.assert_has_valid_error(response, 34)
@unittest_run_loop
async def test_post_batch_with_wait(self):
"""Verifies a POST /batches can wait for commit properly.
It will receive a Protobuf response with:
- batch statuses of {'a': COMMITTED}
It should send a Protobuf request with:
- a batches property that matches the batches sent
- a wait_for_commit property that is True
- a timeout property of 4 (Rest Api default)
It should send back a JSON response with:
- a response status of 201
- no data property
- a link property that ends in '/batches?id=a'
"""
batches = Mocks.make_batches('a')
statuses = {'a': self.status.COMMITTED}
self.stream.preset_response(batch_statuses=statuses)
request = await self.post_batches(batches, wait=True)
self.stream.assert_valid_request_sent(
batches=batches,
wait_for_commit=True,
timeout=4)
self.assertEqual(201, request.status)
response = await request.json()
self.assert_has_valid_link(response, '/batches?id=a')
self.assertNotIn('data', response)
@unittest_run_loop
async def test_post_batch_with_timeout(self):
"""Verifies a POST /batches works when timed out while waiting.
It will receive a Protobuf response with:
- batch statuses of {'pending': PENDING}
It should send a Protobuf request with:
- a batches property that matches the batches sent
- a wait_for_commit property that is True
- a timeout property of 4 (Rest Api default)
It should send back a JSON response with:
- a response status of 200
- a link property that ends in '/batch_status?id=pending'
- a data property matching the batch statuses received
"""
batches = Mocks.make_batches('pending')
statuses = {'pending': self.status.PENDING}
self.stream.preset_response(batch_statuses=statuses)
request = await self.post_batches(batches, wait=True)
self.stream.assert_valid_request_sent(
batches=batches,
wait_for_commit=True,
timeout=4)
self.assertEqual(202, request.status)
response = await request.json()
self.assert_has_valid_link(response, '/batch_status?id=pending&wait')
self.assert_statuses_match(statuses, response['data'])
class BatchStatusTests(BaseApiTest):
async def get_application(self, loop):
self.set_status_and_stream(
Message.CLIENT_BATCH_STATUS_REQUEST,
client_pb2.ClientBatchStatusRequest,
client_pb2.ClientBatchStatusResponse)
handlers = self.build_handlers(loop, self.stream)
return self.build_app(loop, '/batch_status', handlers.list_statuses)
@unittest_run_loop
async def test_batch_status_with_one_id(self):
"""Verifies a GET /batch_status with one id works properly.
It will receive a Protobuf response with:
- batch statuses of {'pending': PENDING}
It should send a Protobuf request with:
- a batch_ids property of ['pending']
It should send back a JSON response with:
- a response status of 200
- a link property that ends in '/batch_status?id=pending'
- a data property matching the batch statuses received
"""
statuses = {'pending': self.status.PENDING}
self.stream.preset_response(batch_statuses=statuses)
response = await self.get_assert_200('/batch_status?id=pending')
self.stream.assert_valid_request_sent(batch_ids=['pending'])
self.assert_has_valid_link(response, '/batch_status?id=pending')
self.assert_statuses_match(statuses, response['data'])
@unittest_run_loop
async def test_batch_status_with_validator_error(self):
"""Verifies a GET /batch_status with a validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
self.stream.preset_response(self.status.INTERNAL_ERROR)
response = await self.get_assert_status('/batch_status?id=pending', 500)
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_batch_status_with_missing_statuses(self):
"""Verifies a GET /batch_status with no statuses breaks properly.
It will receive a Protobuf response with:
- a status of NO_RESOURCE
It should send back a JSON response with:
- a status of 500
- an error property with a code of 27
"""
self.stream.preset_response(self.status.NO_RESOURCE)
response = await self.get_assert_status('/batch_status?id=pending', 500)
self.assert_has_valid_error(response, 27)
@unittest_run_loop
async def test_batch_status_with_wait(self):
"""Verifies a GET /batch_status with a wait set works properly.
It will receive a Protobuf response with:
- batch statuses of {'pending': COMMITTED}
It should send a Protobuf request with:
- a batch_ids property of ['pending']
- a wait_for_commit property that is True
- a timeout property of 4 (Rest Api default)
It should send back a JSON response with:
- a response status of 200
- a link property that ends in '/batch_status?id=pending&wait'
- a data property matching the batch statuses received
"""
statuses = {'pending': self.status.COMMITTED}
self.stream.preset_response(batch_statuses=statuses)
response = await self.get_assert_200('/batch_status?id=pending&wait')
self.stream.assert_valid_request_sent(
batch_ids=['pending'],
wait_for_commit=True,
timeout=4)
self.assert_has_valid_link(response, '/batch_status?id=pending&wait')
self.assert_statuses_match(statuses, response['data'])
@unittest_run_loop
async def test_batch_status_with_many_ids(self):
"""Verifies a GET /batch_status with many ids works properly.
It will receive a Protobuf response with:
- batch statuses of:
* 'committed': COMMITTED
* 'unknown': UNKNOWN
* 'bad': UNKNOWN
It should send a Protobuf request with:
- a batch_ids property of ['committed', 'unknown', 'bad']
It should send back a JSON response with:
- a response status of 200
- link property ending in '/batch_status?id=committed,unknown,bad'
- a data property matching the batch statuses received
"""
statuses = {
'committed': self.status.COMMITTED,
'unknown': self.status.UNKNOWN,
'bad': self.status.UNKNOWN}
self.stream.preset_response(batch_statuses=statuses)
response = await self.get_assert_200(
'/batch_status?id=committed,unknown,bad')
self.stream.assert_valid_request_sent(
batch_ids=['committed', 'unknown', 'bad'])
self.assert_has_valid_link(
response,
'/batch_status?id=committed,unknown,bad')
self.assert_statuses_match(statuses, response['data'])
@unittest_run_loop
async def test_batch_status_with_no_id(self):
"""Verifies a GET /batch_status with no id breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 66
"""
response = await self.get_assert_status('/batch_status', 400)
self.assert_has_valid_error(response, 66)
@unittest_run_loop
async def test_batch_status_as_post(self):
"""Verifies a POST to /batch_status works properly.
It will receive a Protobuf response with:
- batch statuses of:
* 'committed': COMMITTED
* 'pending': PENDING
* 'bad': UNKNOWN
It should send a Protobuf request with:
- a batch_ids property of ['committed', 'pending', 'bad']
It should send back a JSON response with:
- a response status of 200
- an empty link property
- a data property matching the batch statuses received
"""
statuses = {
'committed': self.status.COMMITTED,
'pending': self.status.PENDING,
'bad': self.status.UNKNOWN}
self.stream.preset_response(batch_statuses=statuses)
request = await self.client.post(
'/batch_status',
data=json.dumps(['committed', 'pending', 'bad']).encode(),
headers={'content-type': 'application/json'})
self.stream.assert_valid_request_sent(
batch_ids=['committed', 'pending', 'bad'])
self.assertEqual(200, request.status)
response = await request.json()
self.assertNotIn('link', response)
self.assert_statuses_match(statuses, response['data'])
@unittest_run_loop
async def test_batch_status_wrong_post_type(self):
"""Verifies a bad POST to /batch_status breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 43
"""
request = await self.client.post(
'/batch_status',
data=json.dumps(['a', 'b', 'c']).encode(),
headers={'content-type': 'application/octet-stream'})
self.assertEqual(400, request.status)
response = await request.json()
self.assert_has_valid_error(response, 43)
@unittest_run_loop
async def test_batch_status_as_bad_post(self):
"""Verifies an empty POST to /batch_status breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 46
"""
request = await self.client.post(
'/batch_status',
data=json.dumps('bad body').encode(),
headers={'content-type': 'application/json'})
self.assertEqual(400, request.status)
response = await request.json()
self.assert_has_valid_error(response, 46)
@unittest_run_loop
async def test_batch_status_as_empty_post(self):
"""Verifies an empty POST to /batch_status breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 46
"""
request = await self.client.post(
'/batch_status',
data=json.dumps([]).encode(),
headers={'content-type': 'application/json'})
self.assertEqual(400, request.status)
response = await request.json()
self.assert_has_valid_error(response, 46)
| [
"[email protected]"
] | |
9d036bb0095c09ecccfc0ff1a1f322a58a874a5e | d4f5c7629c992fcb5b73f5bbdee480a7dcf21701 | /bot_deriv_btc.py | 62b85b87db063c15f6130efc919b7dd73b2d4fe1 | [] | no_license | xkayo29/deriv_b3_btc | d827e218b9244ae81532bfeb009386db894a7c6c | 19244c27bb6c43c03c3243c12591b61397e013b0 | refs/heads/main | 2023-04-25T00:44:54.813517 | 2021-05-07T16:44:44 | 2021-05-07T16:44:44 | 365,296,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,781 | py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import MetaTrader5 as mt5 # pip install MetaTrader5
import pandas as pd
import numpy as np
import telebot # pip install pyTelegramBotAPI
from datetime import datetime, time
import time as tm
# %%
# Função para inicializar o MT5 e efetuar login
def login(login, password, servidor):
mt5.initialize()
lg = mt5.login(login, password, servidor)
if lg:
print(f'Conectado com sucesso...')
else:
print(f'Erro na conexção com a conta, erro = {mt5.last_error()}')
mt5.shutdown()
# %%
# Função que verica a oportunidade de entra apos cruzamento das medias para troca de tendencia e enviar mensagem para o telegram
def cruzamento(tendencia):
if tendencia[-5] == 'Alta' and tendencia[-2] == 'Baixa':
return 'Vendido'
elif tendencia[-5] == 'Baixa' and tendencia[-2] == 'Alta':
return 'Comprado'
else:
return False
# %%
# Função para pegar hora atual do computador e comprar com horario de negociação
def iniciar():
# Transforma a hora atual do pc em string
atual = datetime.now().time().strftime('%H:%M:%S')
# Transforma a string em objeto de datetime para fazer comparação
atual = datetime.strptime(atual, '%H:%M:%S').time()
# Cria as variaveis de horario do inicio e fim de funcionamento do mercado
inicio, fim = time(0, 0, 0), time(23, 59, 59)
if atual > inicio and atual < fim:
return True
else:
return False
# %%
# Variaveis para o bot do telgram
# '1608254599:AAHK6CdSPPqDIFTLemsZXMtogGO4B8pkCic'
token = '1576500645:AAG3FBifGkzsutPixGbrxnVSPMV9RaqPDUI'
chat_id = '-1001108876977' # 955453236
# Função para bot do telegram
def bot_telegram(msg, token, chat_id):
tb = telebot.TeleBot(token)
tb.send_message(chat_id, msg)
return f'Mensagem enviada'
# %%
# Chama a função de login
login(1570672, '90576edkF', 'Deriv-Server')
# %%
# Lista de ativos para o laço for
papeis = ['EURUSD', 'USDJPY', 'EURJPY', 'BCHUSD', 'BNBUSD', 'BTCETH', 'BTCLTC', 'BTCUSD', 'DSHUSD', 'EOSUSD', 'ETHUSD',
'IOTUSD', 'LTCUSD', 'NEOUSD', 'OMGUSD', 'TRXUSD', 'XLMUSD', 'XMRUSD', 'XRPUSD', 'ZECUSD']
# Ativando para envia apenas uma notificação
id_s = []
# %%
while True:
while iniciar(): # Verifica se a funcao do horario retorna true para iniciar as analise
# Laço para pecorrer todos os papeis
for papel in papeis:
# Baixa os dados do papel
rates = mt5.copy_rates_from_pos(papel, mt5.TIMEFRAME_M5, 0, 300)
# Transforma os dados em um dataframe
df_rates = pd.DataFrame(rates)
# Converte o time de segundos para data e tempo
df_rates['time'] = pd.to_datetime(df_rates['time'], unit='s')
# %%
# Renomea as colunas existente
df_rates.rename(columns={'time': 'Data', 'open': 'Abertura', 'high': 'Alta', 'low': 'Baixa',
'close': 'Fechamento', 'tick_volume': 'T_volume', 'spread': 'Spread', 'real_volume': 'Volume'}, inplace=True)
# Transforma a Data em index
df_rates.set_index('Data', inplace=True)
# %%
# Criando medias
df_rates['Lenta'] = df_rates['Fechamento'].rolling(200).mean()
df_rates['Rapida'] = df_rates['Fechamento'].rolling(100).mean()
# Limpando dados NaN
df_rates.dropna(inplace=True)
# %%
# Criando a condição de tendencia para nova coluna
condicao = [
(df_rates['Lenta'] > df_rates['Rapida']),
(df_rates['Lenta'] < df_rates['Rapida']),
(df_rates['Lenta'] == df_rates['Rapida'])
]
valor = [
'Baixa', 'Alta', 'Centralizado'
]
# Criando a coluna de tendecia
df_rates['Tendencia'] = np.select(condicao, valor)
# %%
# Iniciando a função da busca por entrada
if cruzamento(df_rates['Tendencia']) == 'Vendido':
if papel not in id_s:
try:
msg = f'{papel} - Entrar vendindo | Preço: {df_rates["Fechamento"].tail(1).values}'
bot_telegram(msg, token=token, chat_id=chat_id)
id_s.append(papel)
print(
f'\n*****{papel}*****\n{df_rates.tail(1)}')
except:
pass
# print(
# f'Erro ao envia via telegram\n{papel} - Entrar vendindo | #Preço: {df_rates["Fechamento"].tail(1).values}')
else:
pass
elif cruzamento(df_rates['Tendencia']) == 'Comprado':
if papel not in id_s:
try:
msg = f'{papel} - Entrar comprado | Preço: {df_rates["Fechamento"].tail(1).values}'
bot_telegram(msg, token=token, chat_id=chat_id)
id_s.append(papel)
print(
f'\n*****{papel}*****\n{df_rates.tail(1)}')
except:
pass
# print(
# f'Erro ao envia via telegram\n{papel} - Entrar comprado | #Preço: {df_rates["Fechamento"].tail(1).values}')
else:
pass
else:
if papel in id_s:
id_s.remove(papel)
# print(
# f'\n*****{papel}*****\n{df_rates["Fechamento"].tail(1).values}')
# tm.sleep(1)
# %%
else:
print(f'Fora do horario de negociação')
break
| [
"[email protected]"
] | |
ab1cd998831a83dfff01278c0eb0f2a0825fb1cb | 14a2a310a0b1022e12858bed60e5b0235c6d2cc4 | /demo/oop/concatenate.py | 773ba6984fec5ddadfaed4071e134e85916f38b6 | [] | no_license | srikanthpragada/python_18_jan_2021 | 0f32b8ea05b5a7ebfc747c86161543711225bfa5 | 5197176e39b4f72a486d8bbe4f108ca783dd51ea | refs/heads/master | 2023-03-13T14:25:20.046629 | 2021-03-01T14:18:14 | 2021-03-01T14:18:14 | 331,602,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | def cat(st1, st2):
if isinstance(st2,str):
return st1 + st2
else:
return st1 + str(st2)
print(cat("abc", "xyz"))
print(cat("abc", 10))
| [
"[email protected]"
] | |
f72b7398402b93378a713bc1e809276281b5c7d3 | b631ee6ebf35f9c09bb0f288e3be3eb3c9667504 | /src/pedidos/apps.py | 3389169b43c7bc12b0841a54c0a5a8f7f9b27068 | [] | no_license | loristron/SimuladorDePedidos | 39145602f87944217e55747c8acd64f98144627a | 81c99c6c1ff9bc3a1df258176fcf54d722ffff50 | refs/heads/master | 2023-03-16T13:56:44.823338 | 2021-03-07T21:10:09 | 2021-03-07T21:10:09 | 345,128,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.apps import AppConfig
class PedidosConfig(AppConfig):
name = 'pedidos'
def ready(self):
import pedidos.signals
| [
"[email protected]"
] | |
5d277728e53b34fd3d324366e0405a9c9147368d | b73eab67f1ec87b91160a59b1f35db226bbaf4a7 | /half_analytical_const_lensz.py | c193bb92e5549ba15d5b6bf4ea50b9c92c915d3d | [] | no_license | lingxz/lightbending | 35833a7d99afbb62bc2dc8ffdf95e9d04ff8d46f | b27f8a1ea9733b7d79b0794a78416f14179845db | refs/heads/master | 2022-05-16T20:47:21.233309 | 2018-05-07T03:58:29 | 2018-05-07T03:58:29 | 106,876,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,514 | py | '''
origin at lens
'''
# diff_lambdas_small2: latest, diff z_lens, 100 per Omega_Lambda, but wrong DL
# diff_lambdas_small: same z_lens, diff z_source, 100 per Omega_Lambda
# diff_lambdas_small3: same as diff_lambdas_small2, but with correct DL
# diff_lambdas_small4: same as previous, but the step size in kottler solver is changed to agree with dt. Previously was fixed at 5e-7, oops!!!
# diff_lambdas_bigger_redshifts: bigger redshifts, 0.2 to 1 instead of 0.05 to 0.2.
# diff_lambdas_bigger_redshifts2: same as above, just more points
import numpy as np
import scipy.integrate as spi
import matplotlib.pyplot as plt
import time
import pandas as pd
length_scale = 3.086e22 # mega parsec
INTEGRATOR = 'vode'
INTEGRATOR_PARAMS = {
'atol': 1e-110,
# 'atol': 0,
# 'rtol': 0,
'rtol': 1e-15,
'nsteps': 100000000,
'method': 'bdf',
}
# H_0 = 7.33e-27
H_0 = 7.56e-27 * length_scale
# Omega_Lambda = 0
# Omega_m = 1 - Omega_Lambda
# M = 0.5e15 / length_scale
M = 1474e12 / length_scale
tdot_to_steps_ratio = None
print("M: ", M)
# def frw(eta, w, p):
# L, k, Omega_Lambda, Omega_m, H_0 = p
# a, r, rdot, t, phi = w
# a_t = a * H_0 * np.sqrt(Omega_m/a**3 + Omega_Lambda)
# phidot = L / (a*r)**2
# # tddot = -a*r**2*phidot**2*a_t - a*rdot**2*a_t/ (1 - k*r**2)
# tdot = -np.sqrt(a**2*rdot**2+a**2*r**2*phidot**2)
# rddot = r*phidot**2 - k*rdot**2 - 2*a_t/a*rdot*tdot
# return [
# a_t*tdot,
# rdot,
# rddot,
# tdot,
# phidot,
# ]
def omega_lambda2lambda(Omega_Lambda):
return 3*Omega_Lambda*H_0**2
def kantowski_alpha(R, phi, Omega_Lambda):
r0 = 1/(1/R + M/R**2 + 3/16*M**2/R**3)
# r0 = R*(1-M/R - 3/2*(M/R)**2 - 4*(M/R)**3)
Lambda = omega_lambda2lambda(Omega_Lambda)
rs = 2*M
first_term = (rs/2/r0)*np.cos(phi)*(-4*(np.cos(phi))**2 - 12*np.cos(phi)*np.sin(phi)*np.sqrt(Lambda*r0**2/3+rs/r0*(np.sin(phi))**3) + Lambda*r0**2*(8/3-20/3*(np.sin(phi))**2))
second_term = (rs/2/r0)**2*(15/4*(2*phi-np.pi) + np.cos(phi)*(4+33/2*np.sin(phi)-4*(np.sin(phi))**2+19*(np.sin(phi))**3-64*(np.sin(phi))**5) - 12*np.log(np.tan(phi/2))*(np.sin(phi))**3)
return first_term + second_term
def get_angle(r, phi, rdot, phidot):
res = np.arctan((rdot*np.sin(phi)+r*np.cos(phi)*phidot)/(rdot*np.cos(phi)-r*np.sin(phi)*phidot))
return res
def kottler(eta, w, p):
E, L, M, Omega_Lambda, Omega_m, H_0 = p
r_h, t, r, rdot, phi = w
Lambda = 3*Omega_Lambda*H_0**2
# Lambda = 0
f = 1 - 2*M/r - Lambda/3*r**2
rddot = L**2 * (r - 3*M) / r**4
tdot = E / f
phidot = L/r**2
r_h_t = (1 - 2*M/r_h - Lambda/3*r_h**2) * np.sqrt(2*M/r_h + Lambda/3*r_h**2)
# r_h_t = (1 - 2*M/r_h) * np.sqrt(2*M/r_h)
return [
r_h_t*tdot,
tdot,
rdot,
rddot,
phidot,
]
def cart2spherical(x):
return np.array([
np.sqrt(x[0]**2 + x[1]**2),
np.arctan(x[1]/x[0])
])
def conformal_time(a, Omega_m, Omega_Lambda):
def tmp(a1):
return 1/a1**2/np.sqrt(Omega_m/a1**3 + Omega_Lambda)
result, err = spi.quad(tmp, 1, a, epsrel=1e-8, epsabs=1e-8)
return result/H_0
def binary_search(start, end, answer, Omega_m, Omega_Lambda):
mid = (end+start)/2
res = conformal_time(mid, Omega_m, Omega_Lambda)
# print(mid, res, answer)
if np.isclose(res, answer, rtol=1e-10, atol=0):
return mid
if res < answer:
return binary_search(mid, end, answer, Omega_m, Omega_Lambda)
else:
return binary_search(start, mid, answer, Omega_m, Omega_Lambda)
def solve(angle_to_horizontal, comoving_lens=None, plot=True, Omega_Lambda=0, dt=None):
k = 0
a0 = 1
initial_a = a0
# initial_r = 10e17
initial_r = comoving_lens
initial_phi = np.pi
initial_t = 0.
Omega_m = 1 - Omega_Lambda
initial_rdot = -initial_r
initial_phidot = np.tan(angle_to_horizontal) * initial_rdot / initial_r
initial_R = initial_a*initial_r
initial_tdot = -np.sqrt(initial_a**2/(1-k*initial_r**2)*initial_rdot**2 + initial_R**2*initial_phidot**2)
if plot:
print("initial velocities:", initial_rdot, initial_phidot, initial_tdot)
rho = Omega_m*3*H_0**2/(8*np.pi)
r_h = 1/initial_a*(3*M/(4*np.pi*rho))**(1./3)
if r_h > initial_r:
print("Starting point is inside the hole! Make the hole smaller or bring the lens further away.")
return
if plot:
print('r_h:', r_h, "\n")
Lambda = 3*Omega_Lambda*H_0**2
L_frw = (initial_a*initial_r)**2*initial_phidot
p_frw = [L_frw, k, Omega_Lambda, Omega_m, H_0]
initial = [initial_a, initial_r, initial_rdot, initial_t, initial_phi]
# save for later frw straight line propagation
initial0 = initial
x_initial = np.array([0, 0])
velocity = np.sqrt((initial_rdot*np.cos(initial_phi)-initial_r*np.sin(initial_phi)*initial_phidot)**2 + (initial_rdot*np.sin(initial_phi)+initial_r*np.cos(initial_phi)*initial_phidot)**2)
direction = np.array([
initial_rdot*np.cos(initial_phi)-initial_r*np.sin(initial_phi)*initial_phidot,
initial_rdot*np.sin(initial_phi)+initial_r*np.cos(initial_phi)*initial_phidot,
])
direction = direction / velocity
# direction = np.array([
# velocity*np.cos(angle_to_horizontal),
# velocity*np.sin(angle_to_horizontal),
# ])
delta_eta = np.roots([direction[0]**2+direction[1]**2, -2*direction[0]*comoving_lens, comoving_lens**2-r_h**2])
if plot:
print("delta_eta", delta_eta)
delta_eta = sorted(delta_eta[delta_eta > 0])[0]
eta_initial = 0
eta_out = eta_initial - delta_eta
if plot:
print("eta_out", eta_out, delta_eta)
x_final = x_initial + (eta_initial - eta_out)*direction
x_final[0] -= comoving_lens
r_final, phi_final = cart2spherical(x_final)
phi_final = np.pi + phi_final # change from 4th to second quadrant
# print("conformal_time", conformal_time(0.9997899999999995, Omega_m, Omega_Lambda))
a_final = binary_search(0.5, 1, eta_out, Omega_m, Omega_Lambda)
phidot_final = L_frw/(a_final*r_final)**2
rdot_final = (r_final*np.cos(phi_final)*phidot_final+r_final*np.sin(phi_final)*phidot_final*np.tan(angle_to_horizontal))/(np.cos(phi_final)*np.tan(angle_to_horizontal)-np.sin(phi_final))
# rdot_final = r_final*phidot_final/np.tan(angle_to_horizontal)
tdot_final = -np.sqrt(a_final**2*rdot_final**2 + a_final**2*r_final**2*phidot_final**2)
t_final = 0 # not important
if plot:
print("phi_final", phi_final)
print("a_final", a_final)
last = [a_final, r_final, rdot_final, t_final, phi_final]
# res = np.arctan((rdot*np.sin(phi)+r*np.cos(phi)*phidot)/(rdot*np.cos(phi)-r*np.sin(phi)*phidot))
frw_angle_before_entering = get_angle(last[1], last[4], last[2], L_frw/(last[0]*last[1])**2)
if plot:
print("angle_to_horizontal", angle_to_horizontal)
print("Angle in FRW::")
print(frw_angle_before_entering)
print(last)
print("\n")
solver_kottler = spi.ode(kottler).set_integrator(INTEGRATOR, **INTEGRATOR_PARAMS)
# a, r, rdot, t, phi = w
r_out = last[0] * last[1]
exit_rh = r_out
tdot_out = -np.sqrt(last[0]**2*last[2]**2+last[0]**2*last[1]**2*L_frw/(last[0]*last[1])**2)
initial_t = 0
initial_r = r_out
initial_phi = last[4]
initial_rh = initial_r
f = 1 - 2*M/r_out - Lambda/3*r_out**2
etadot = tdot_out / last[0] # conformal time
# a, r, rdot, t, phi
initial_rdot = last[0] * (np.sqrt(1 - f)*etadot + last[2])
initial_tdot = last[0]/f*(etadot + np.sqrt(1-f)*last[2])
initial_phidot = L_frw / (last[0]*last[1])**2
L_kottler = initial_phidot *initial_r**2
initial_kottler = [initial_rh, initial_t, initial_r, initial_rdot, initial_phi]
E = f*initial_tdot
p_kottler = [E, L_kottler, M, Omega_Lambda, Omega_m, H_0]
# global tdot_to_steps_ratio
# if not tdot_to_steps_ratio:
# tdot_to_steps_ratio = initial_tdot/dt
# else:
# dt = initial_tdot / tdot_to_steps_ratio
# print("tdot_to_steps_ratio", tdot_to_steps_ratio, dt)
# print()
solver_kottler.set_initial_value(initial_kottler, 0).set_f_params(p_kottler)
if plot:
print("kottler initial:")
print("initial_rh, initial_t, initial_r, initial_rdot, initial_phi")
print(initial_kottler)
print("initial_tdot", initial_tdot)
angle_before_entering = get_angle(initial_r, initial_phi, initial_rdot, initial_phidot)
if plot:
print("light ray angle before entering kottler hole: ", angle_before_entering)
print("initial conditions on entering kottler hole: ", initial_kottler)
print("initial params on entering kottler hole: ", p_kottler)
first_time = True
prev_r = np.inf
while solver_kottler.successful():
solver_kottler.integrate(solver_kottler.t + dt, step=False)
# sol_kottler.append(list(solver_kottler.y))
# if solver_kottler.y[2] > prev_r and first_time:
# if plot:
# print("turning point in kottler metric:", solver_kottler.y[2])
# first_time = False
# else:
# prev_r = solver_kottler.y[2]
if solver_kottler.y[4] < np.pi/2 and first_time:
if plot:
print("turning point in kottler metric:", solver_kottler.y[2])
first_time = False
# if solver_kottler.y[4] < np.pi/2 and np.isclose(solver_kottler.y[2], solver_kottler.y[0], rtol=1e-5):
# last = solver_kottler.y
# print("here??")
# break
if solver_kottler.y[2] > solver_kottler.y[0]:
# print("exit hole at", solver_kottler.y[2], solver_kottler.y[0], solver_kottler.y[2]-solver_kottler.y[0])
last = solver_kottler.y
break
angle_after_exiting = get_angle(last[2], last[4], last[3], L_kottler/last[2]**2)
if plot:
print("light ray angle after exiting kottler hole: ", angle_after_exiting)
print("bending angle in kottler: ", angle_before_entering - angle_after_exiting)
print("\n")
if plot:
print("time in hole: ", last[1])
# r_h, t, r, rdot, phi = w
exit_phi = last[4]
initial_phi = last[4]
initial_r = r_h
initial_a = last[2] / r_h
if plot:
print("scale factor on exit:", initial_a)
print("exit r in FRW:", initial_r)
initial_phidot = L_kottler / last[2]**2
f = 1-2*M/last[2]-Lambda/3*last[2]**2
last_tdot = E/f
initial_rdot = 1/initial_a*(1/f*last[3] - np.sqrt(1-f)*last_tdot)
initial_etadot = 1/initial_a*(last_tdot - np.sqrt(1-f)/f*last[3])
initial_tdot = initial_etadot * initial_a
p_frw[0] = initial_a**2 * initial_r**2*initial_phidot # change the L_frw
# p_frw = [L_frw, k, Omega_Lambda, Omega_m, H_0]
# r_h, t, r, rdot, phi = w
initial_t = 0
# a, t, r, rdot, phi
frw_angle_after_exiting = get_angle(initial_r, initial_phi, initial_rdot, initial_phidot)
if plot:
print("Angle after exiting FRW:", frw_angle_after_exiting)
print("bending angle in FRW: ", frw_angle_before_entering - frw_angle_after_exiting)
print("\n")
# check if its going to cross the axis
initial_ydot = initial_rdot*np.sin(initial_phi) + initial_r*np.cos(initial_phi)*initial_phidot
if initial_ydot > 0:
print("light ray is not going to cross the axis, decrease angle_to_horizontal")
print("initial angle to horizontal: ", angle_to_horizontal)
print("----")
if initial_r*np.sin(initial_phi) < 0:
print("light ray is bent too much by the hole, increase angle_to_horizontal")
print("initial angle to horizontal: ", angle_to_horizontal)
print("----")
alpha = frw_angle_before_entering - frw_angle_after_exiting
ans = initial_r*np.sin(initial_phi)/np.tan(np.abs(frw_angle_after_exiting)) + initial_r*np.cos(initial_phi)
return ans, exit_rh, exit_phi, alpha
# return r[-1], source_a
# [ 0.18075975 0.05122652 0.23824122 0.31020329 0.20010044 0.39768539
# 0.43731914 0.46608477 0.37572935 0.39980157]
def get_distances(z, Omega_Lambda=0):
Omega_m = 1 - Omega_Lambda
def integrand(z):
return 1/np.sqrt(Omega_m*(1+z)**3 + Omega_Lambda)
integral, error = spi.quad(integrand, 0, z, epsrel=1e-8, epsabs=1e-8)
comoving = integral/H_0
dang = comoving/(1+z)
return comoving, dang
def calc_theta(D_LS, D_L, D_S):
return np.sqrt(4*M*D_LS/D_L/D_S)
def theta2rls_flat(theta, z_lens):
rl, dl = get_distances(z_lens, Omega_Lambda=0)
return 4*calculate_mass(0)*rl/(4*calculate_mass(0)-dl*theta**2)
def rs2theta(rs, rl, dl):
return np.sqrt(4*M*(rs-rl)/rs/dl)
def rs2redshift_flat(rs):
return 1/(1-H_0*rs/2)**2 -1
def calculate_mass(om):
rho = (1-om)*3*H_0**2/(8*np.pi)
return 4/3*np.pi*rho*r_h**3
from tqdm import tqdm
def main():
start = time.time()
om_lambdas = np.linspace(0.9, 0.99, 1)
# om_lambdas = np.array([0.99])
start_ms = np.linspace(1474e12/length_scale, 5*1474e12/length_scale, 1)
# start_ms = np.linspace(8*1474e13/length_scale, 5*1474e15/length_scale, 1)
z_lens = 1.
start_thetas = np.array([2e-6]*50)
# start_thetas = np.array([2.18e-4]*50)
step_size = 1e-9
first = True
filename = 'data/half_analytical_const_lensz_throwaway.csv'
print(filename)
for theta, current_m in tqdm(list(zip(start_thetas, start_ms))):
global M
M = current_m
rs = []
thetas = []
dl = []
ms = []
raw_rs = []
com_lens = []
exit_rhs = []
enter_phis = []
alphas = []
for om in tqdm(om_lambdas):
ms.append(current_m)
comoving_lens, dang_lens = get_distances(z_lens, Omega_Lambda=om)
# source_r, dang_r = get_distances(source_z, Omega_Lambda=om)
# theta = rs2theta(source_r, comoving_lens, dang_lens)
# print("lens r", comoving_lens, theta)
com_lens.append(comoving_lens)
r, exit_rh, enter_phi, alpha = solve(theta, plot=True, comoving_lens=comoving_lens, Omega_Lambda=om, dt=step_size)
exit_rhs.append(exit_rh)
enter_phis.append(enter_phi)
alphas.append(alpha)
R = dang_lens*theta
A_frw = 4*M/R + 15*np.pi*M**2/4/R**2 + 401/12*M**3/R**3
frw = comoving_lens/(A_frw/theta -1)
print("enter_phi", enter_phi)
print("R", dang_lens*theta)
print("compare", r, frw, r/frw-1)
print("A_frw", A_frw)
k_alpha = kantowski_alpha(dang_lens*theta, enter_phi, om)
print("k_alpha", k_alpha)
print("difference", -alpha/k_alpha-1)
r0 = 1/(1/R + M/R**2 + 3/16*M**2/R**3)
extra_term = (2*M/r0 + omega_lambda2lambda(om)*r0**2)**(5/2) / (4*M/R*(np.cos(enter_phi))**3)
print("extra term ratio", extra_term)
thetas.append(theta)
raw_rs.append(r)
rs.append(r+comoving_lens)
dl.append(dang_lens)
rs = np.array(rs)
thetas = np.array(thetas)
dl = np.array(dl)
ms = np.array(ms)
raw_rs = np.array(raw_rs)
com_lens = np.array(com_lens)
exit_rhs = np.array(exit_rhs)
enter_phis = np.array(enter_phis)
alphas = np.array(alphas)
# DL,DLS,DS,M,numerical_thetas,om_lambdas,rs,rs_initial,step,theta,z_lens,comoving_lens,raw_rs
df = pd.DataFrame({
'DL': dl,
'M': ms,
'numerical_thetas': thetas,
'om_lambdas': om_lambdas,
'rs': rs,
# 'rs_initial': source_rs_array,
'step': [step_size]*len(thetas),
'theta': thetas,
'z_lens': [z_lens]*len(thetas),
'comoving_lens': com_lens,
'raw_rs': raw_rs,
'exit_rhs': exit_rhs,
'enter_phis': enter_phis,
'alphas': alphas,
})
# df = df[['DL','DLS','DS','M','numerical_thetas','om_lambdas','rs','rs_initial','step','theta','z_lens','comoving_lens','raw_rs']]
if first:
df.to_csv(filename, index=False)
first = False
else:
df.to_csv(filename, index=False, header=False, mode='a')
print("Time taken: {}".format(time.time() - start))
main()
# main2()
# plt.show()
# plt.savefig('images/lambda.png') | [
"[email protected]"
] | |
e76ce8a2f472e3b1aa0ab5ddaf95486255b9ea6d | 89841a2b522b7b1ab7965560f62b4b401b2d0a4d | /gravity physics/gravity simulation.py | 105b78ee8eb670cd9124ca39718d4366f32d17f6 | [] | no_license | sidd5sci/python-basics | 14d621d52d3219943e2b0136c610dd769cc36a29 | fea620141292cb6beee782cddb5a7d4eeb067e9a | refs/heads/master | 2021-01-20T00:22:29.496330 | 2017-04-22T17:03:00 | 2017-04-22T17:03:00 | 89,123,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | import random
import math
from Tkinter import *
import time
PI = 3.14159
g = 9.87 #m/s2
ctime = 0.0
class vector:
def __init__(self):
self.direction = 0.0
self.magnitude = 0.0
def asign(self,drct,mag,angletype):
if(angletype == 'R'):
self.direction = drct
elif(angletype == 'D'):
self.direction = (PI/180)*drct
self.magnitude = mag
def getDirection(self):
return self.direction*(180/PI)
def getMagnitude(self):
return self.magnitude
class vertex:
def __init__(self):
self.x = 0.0
self.y = 0.0
self.z = 0.0
positionVector = vector()
def asign(self,X,Y,Z):
self.x = X
self.y = Y
self.z = Z
def random(self,irange,frange):
self.x = random.uniform(irange,frange)
self.y = random.uniform(irange,frange)
self.z = random.uniform(irange,frange)
class edge :
def __init__(self):
self.initVertex = vertex()
self.finalVertex = vertex()
def asignVertex(self,vi= vertex(),vf= vertex()):
self.initVertex = vi
self.finalVertex = vf
def asignVertex(self,xi,yi,zi,xf,yf,zf):
self.initVertex.x = xi
self.initVertex.y = yi
self.initVertex.z = zi
self.finalVertex.x = xf
self.finalVertex.y = yf
self.finalVertex.z = zf
WIDTH =900
HEIGHT = 400
root = Tk()
root.title("Trapped Fly!")
canvas = Canvas(root,width= WIDTH,height=HEIGHT)
canvas.pack()
#for i in range(0,100):
def drawEquiTriangle(color, polygonCentre,z_axisRotation,mag):
v1,v2,v3 = vector(),vector(),vector()
v1.asign(90+z_axisRotation,mag,"D")
v2.asign(90+120+z_axisRotation,mag,"D")
v3.asign(90+240+z_axisRotation,mag,"D")
ver1,ver2 = vertex(),vertex()
ver1.x = polygonCentre.x + math.cos(v1.direction)*v1.magnitude
ver1.y = polygonCentre.y + math.sin(v1.direction)*v1.magnitude
ver2.x = polygonCentre.x + math.cos(v2.direction)*v2.magnitude
ver2.y = polygonCentre.y + math.sin(v2.direction)*v2.magnitude
poly = canvas.create_line(ver1.x,ver1.y,ver2.x,ver2.y,fill = color);
ver1.x = polygonCentre.x + math.cos(v3.direction)*v3.magnitude
ver1.y = polygonCentre.y + math.sin(v3.direction)*v3.magnitude
poly = canvas.create_line(ver2.x,ver2.y,ver1.x,ver1.y,fill = color);
ver2.x = polygonCentre.x + math.cos(v1.direction)*v1.magnitude
ver2.y = polygonCentre.y + math.sin(v1.direction)*v1.magnitude
poly = canvas.create_line(ver2.x,ver2.y,ver1.x,ver1.y,fill = color);
# mass of triangle
mass = 50# kg
GForce = mass*g
vy = g*(ctime+1)
v = vertex()
v.asign(300,200,0)
velox,veloy,result = vector(),vector(),vector()
velox.asign(0,15.10,"D")
veloy.asign(90,100,"D")# rotation is anticlock wise
tempx = v.x
tempy = v.y
for i in range(0,100):
drawEquiTriangle("blue",v,10,10)
mag = math.sqrt((velox.magnitude*velox.magnitude)+(veloy.magnitude*veloy.magnitude))
theta = math.atan(veloy.magnitude/velox.magnitude)*(180/PI)
# convert to environent angles
if velox.magnitude < 0 and veloy.magnitude >0:
theta = -theta +90
if velox.magnitude < 0 and veloy.magnitude <0:
theta = theta +180
if velox.magnitude > 0 and veloy.magnitude <0:
theta = 360+theta
result.asign(-theta,mag,"D")
root.update()
vy = vy+ g*ctime
veloy.asign(90,-vy,"D")
print (result.magnitude," | ",result.getDirection())
v.x += math.cos(result.direction)*result.magnitude
v.y += math.sin(result.direction)*result.magnitude
canvas.delete(ALL)
time.sleep(0.7)
ctime+=0.1
root.mainloop()
| [
"[email protected]"
] | |
e6c5e5cdf587ad79fbec86f6c8696f8beba6a0fb | c3131577f706b25623da31f1d5af668c717eb91b | /jmt/check_trimout.py | f5f3d31b8210123445a574364265c67a7db66d1c | [] | no_license | cmspixelpilot/POSOverlay | 29e5739cf4f1556802ca02ae8ee1663dcd941b22 | abdd29afe0aeddefc111a14f723ec365329b34a8 | refs/heads/master | 2020-04-04T04:17:25.383958 | 2016-10-14T14:19:15 | 2016-10-14T14:19:15 | 29,026,300 | 1 | 13 | null | 2016-11-24T19:45:07 | 2015-01-09T17:04:16 | C++ | UTF-8 | Python | false | false | 1,789 | py | from JMTTools import *
try:
run = run_from_argv()
except ValueError:
run = None
if run is not None:
calib = calib_dat(run)
calib_rocs = calib.rocs
pixels = calib.pixels
detconfig = detconfig_dat(run)
rocs_qual = detconfig.rocs['qual']
rocs_noqual = detconfig.rocs['noqual']
trim_fn = glob(run_fn(run, 'Trim*dat'))[0]
else:
calib_rocs = ['all']
pixels = [(r,c) for r in xrange(80) for c in range(52)]
rocs_qual = []
rocs_noqual = ['Pilt_BmO_D3_BLD%i_PNL%i_PLQ1_ROC%i' % (bld, pnl, roc) for bld in (10,11) for pnl in (1,2) for roc in range(16) if (bld,pnl) != (11,1)]
rocs_noqual += ['Pilt_BmI_D3_BLD%i_PNL%i_PLQ1_ROC%i' % (bld, pnl, roc) for bld in (2,3) for pnl in (1,2) for roc in range(16)]
trim_fn = sys.argv[1]
print 'run:', run
trims = [TrimResult(x.strip()) for x in open(trim_fn).read().split('\n') if x.strip()]
trims_by_roc = defaultdict(list)
trims_by_roc_px = defaultdict(list)
for t in trims:
trims_by_roc[t.roc].append(t)
trims_by_roc_px[(t.roc, t.row, t.col)].append(t)
assert all(len(v) == 1 for v in trims_by_roc_px.itervalues())
assert calib_rocs == ['all']
for roc, quals in rocs_qual:
assert quals == ('noAnalogSignal',)
print '# trims:', len(trims)
print '# rocs:', len(rocs_noqual)
print '# pix:', len(pixels)
should = len(rocs_noqual) * len(pixels)
print '-> should have %i trims, missing %i' % (should, should-len(trims))
no_trim = defaultdict(list)
for roc in rocs_noqual:
for r,c in pixels:
if not trims_by_roc_px.has_key((roc, r, c)):
no_trim[roc].append((r,c))
if no_trim:
print 'no trim for:'
for roc in sorted(no_trim.iterkeys()):
print roc.ljust(40), '# pix:', len(no_trim[roc])
print 'sum:', sum(len(v) for v in no_trim.itervalues())
| [
"[email protected]"
] | |
d0d63a9a8cd95eb9efff60c2a9ff226382464f4d | 6e0d936d3b308ea0a2b179abf58d1e9520022174 | /fiskalnavenv/bin/iptest3 | 2ecac9b10d4626665dafc2b64f9a3be508cb70f4 | [] | no_license | robizd/fiskali | e9273697fab7593cabfe7c898ec8b16b3f62ecf1 | 4e27b08a08de44fcf6a5459866b1b59b2c31b89c | refs/heads/master | 2022-12-16T21:50:06.538824 | 2020-09-23T10:51:23 | 2020-09-23T10:51:23 | 292,207,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/home/r/Desktop/fiskalna/fiskalnavenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a04c4b33ed76bd6588fc389d6e747735858613ee | 1236a8208138a6f8d15a0fc576fdec98e01f7d8f | /mypy/10.模块/10.3.包的概念.py | 2f2bc8c81b8c103b7806125e81809343dc9cd2ae | [] | no_license | dingzishidtc/dzPython | cfb26ca7c38b1ed44a0e42ba5d1541aa8169ae19 | 3adf8aba5fe12ab28bd6d02e4e6baa60b828985d | refs/heads/master | 2023-01-30T00:35:12.915290 | 2020-12-09T13:04:48 | 2020-12-09T13:04:48 | 260,984,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # 包即 package, 模块即 module,
# 可以这样简单的理解,module 是文件,package 是文件夹
# package 包含 module,package 也可以包含 package
#
# 当然还要做一些前期的工作,文件夹需要增加 __init__.py 才会被 python 解析为 pakage (实际测试的时候发现可以没有,可能是版本的关系,建议还是添加)
# __init__.py 中的代码将是包的内容,如可以在其中定义变量、方法等
#
# 如一个目录结构如下,环境变量定义为 D:\environment\python\modules
#
# D:\environment\python\modules
# └─fakeGame
# ├─ item
# | └─ sword.py
# ├─ hero.py
# └─ monster.py
from fakeGame import hero, monster
import fakeGame.items.sword
h = fakeGame.hero.Hero()
m = fakeGame.monster.Monster()
s = fakeGame.items.sword.Sword()
print("=================================================")
# 实际使用时会发现,在使用一个包下的任意一个模块时,都要显式引用模块,当一个包中包含了大量模块时,这种引用几乎是不可能的
# 考虑到在引入包时,会先调用 __init__.py ,因此可以借助 __init__.py 来实现包内部的自我引用
#
# 在 fakeGame2/__init__.py 中加入
# import fakeGame2.items
# import fakeGame2.hero
# import fakeGame2.monster
#
# 在 fakeGame2/items/__init__.py 中加入
# import fakeGame2.items.sword
#
# 那么在引入 fakeGame2 时,会先调用 fakeGame2/__init__.py,之后引用 import fakeGame2.items 会调用 fakeGame2/items/__init__.py
# 顺次下去,最终结果时 只要引入了 fakeGame2 这个包,包下面的所有模块都加载了
#
# D:\environment\python\modules
# └─fakeGame2
# ├─ __init__.py
# ├─ item
# | ├─ __init__.py
# | └─ sword.py
# ├─ hero.py
# └─ monster.py
#
import fakeGame2
h = fakeGame2.hero.Hero()
h = fakeGame2.monster.Monster()
h = fakeGame2.items.sword.Sword()
# 这样就解决了大部分的问题,当然关于包的知识还有很多,这里不再具体介绍,遇到的时候再说把 | [
"[email protected]"
] | |
60cdc12212c922a0f11ab3f0c6b2a4088eef2a80 | f0e2a0190c0b9e4591ec25c525882cfeec6c1b87 | /mahjong/yaku_checker/kokushi.py | c647e6e9d9a8ea963931f9715c2cc43f033f7ead | [] | no_license | thgwon-yaho-qcw/Mahjong | 33d893c30a284e8b59664638bc6061a6e92845aa | 25ef2684507225260e020eba1f841ec656204a3f | refs/heads/master | 2023-08-29T00:28:42.136910 | 2021-10-19T09:04:02 | 2021-10-19T09:04:02 | 347,359,493 | 1 | 0 | null | 2021-10-19T09:04:03 | 2021-03-13T12:04:32 | Python | UTF-8 | Python | false | false | 202 | py | from mahjong.divider import Division
from mahjong.yaku_checker.yaku import Yaku
class Kokushi(Yaku):
def is_satisfied(self, division: Division, hand_info):
return len(division.parts) == 1
| [
"[email protected]"
] | |
2db24c5c9eb872707f7249df40008aa9ac62b2b6 | e5d130e183b5dea1b7aad23a047c703fa0d2b3bf | /lightbus_vendored/aioredis/abc.py | 5c1bed890fcba6bae0602711e53bf29b7d888982 | [
"Apache-2.0"
] | permissive | adamcharnock/lightbus | 4a86428b8203bfe98f77a32375ac961ef398ce16 | cf892779a9a9a8f69c789ffa83c24acfb7f9a336 | refs/heads/master | 2023-08-26T04:19:39.395735 | 2023-08-23T11:07:44 | 2023-08-23T11:07:44 | 94,617,214 | 193 | 22 | Apache-2.0 | 2023-08-10T21:21:51 | 2017-06-17T10:39:23 | Python | UTF-8 | Python | false | false | 3,746 | py | """The module provides connection and connections pool interfaces.
These are intended to be used for implementing custom connection managers.
"""
import abc
__all__ = [
'AbcConnection',
'AbcPool',
'AbcChannel',
]
class AbcConnection(abc.ABC):
"""Abstract connection interface."""
@abc.abstractmethod
def execute(self, command, *args, **kwargs):
"""Execute redis command."""
@abc.abstractmethod
def execute_pubsub(self, command, *args, **kwargs):
"""Execute Redis (p)subscribe/(p)unsubscribe commands."""
@abc.abstractmethod
def close(self):
"""Perform connection(s) close and resources cleanup."""
@abc.abstractmethod
async def wait_closed(self):
"""
Coroutine waiting until all resources are closed/released/cleaned up.
"""
@property
@abc.abstractmethod
def closed(self):
"""Flag indicating if connection is closing or already closed."""
@property
@abc.abstractmethod
def db(self):
"""Current selected DB index."""
@property
@abc.abstractmethod
def encoding(self):
"""Current set connection codec."""
@property
@abc.abstractmethod
def in_pubsub(self):
"""Returns number of subscribed channels.
Can be tested as bool indicating Pub/Sub mode state.
"""
@property
@abc.abstractmethod
def pubsub_channels(self):
"""Read-only channels dict."""
@property
@abc.abstractmethod
def pubsub_patterns(self):
"""Read-only patterns dict."""
@property
@abc.abstractmethod
def address(self):
"""Connection address."""
class AbcPool(AbcConnection):
"""Abstract connections pool interface.
Inherited from AbcConnection so both have common interface
for executing Redis commands.
"""
@abc.abstractmethod
def get_connection(self, command, args=()):
"""
Gets free connection from pool in a sync way.
If no connection available — returns None.
"""
@abc.abstractmethod
async def acquire(self, command=None, args=()):
"""Acquires connection from pool."""
@abc.abstractmethod
def release(self, conn):
"""Releases connection to pool.
:param AbcConnection conn: Owned connection to be released.
"""
@property
@abc.abstractmethod
def address(self):
"""Connection address or None."""
class AbcChannel(abc.ABC):
"""Abstract Pub/Sub Channel interface."""
@property
@abc.abstractmethod
def name(self):
"""Encoded channel name or pattern."""
@property
@abc.abstractmethod
def is_pattern(self):
"""Boolean flag indicating if channel is pattern channel."""
@property
@abc.abstractmethod
def is_active(self):
"""Flag indicating that channel has unreceived messages
and not marked as closed."""
@abc.abstractmethod
async def get(self):
"""Wait and return new message.
Will raise ``ChannelClosedError`` if channel is not active.
"""
# wait_message is not required; details of implementation
# @abc.abstractmethod
# def wait_message(self):
# pass
@abc.abstractmethod
def put_nowait(self, data):
"""Send data to channel.
Called by RedisConnection when new message received.
For pattern subscriptions data will be a tuple of
channel name and message itself.
"""
@abc.abstractmethod
def close(self, exc=None):
"""Marks Channel as closed, no more messages will be sent to it.
Called by RedisConnection when channel is unsubscribed
or connection is closed.
"""
| [
"[email protected]"
] | |
4c2d33c12259b37ebf02f8da514795c25f84d6b8 | 36ace720c8507b8724957f6c85a5962d15a2f99c | /tests/unit/broker/test_simulated_broker.py | 5168d71e8f8346c5f1026d4661877b98204b2e2b | [
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | simongarisch/qstrader | 8ce950036f6404dd4d6c61016cb6c77805942cd5 | 557f2fc2598b631a005309e415a5dab12793fd14 | refs/heads/master | 2021-06-27T04:51:24.588474 | 2020-08-29T05:58:08 | 2020-08-29T05:58:08 | 102,059,555 | 0 | 0 | MIT | 2020-08-29T04:36:52 | 2017-09-01T01:11:25 | Python | UTF-8 | Python | false | false | 21,014 | py | import queue
import numpy as np
import pandas as pd
import pytest
import pytz
from qstrader.broker.portfolio.portfolio import Portfolio
from qstrader.broker.simulated_broker import SimulatedBroker
from qstrader.broker.fee_model.zero_fee_model import ZeroFeeModel
from qstrader import settings
class ExchangeMock(object):
def get_latest_asset_bid_ask(self, asset):
return (np.NaN, np.NaN)
def is_open_at_datetime(self, dt):
return True
class ExchangeMockException(object):
def get_latest_asset_bid_ask(self, asset):
raise ValueError("No price available!")
def is_open_at_datetime(self, dt):
return True
class ExchangeMockPrice(object):
def is_open_at_datetime(self, dt):
return True
class DataHandlerMock(object):
def get_asset_latest_bid_ask_price(self, dt, asset):
return (np.NaN, np.NaN)
def get_asset_latest_mid_price(self, dt, asset):
return np.NaN
class DataHandlerMockPrice(object):
def get_asset_latest_bid_ask_price(self, dt, asset):
return (53.45, 53.47)
def get_asset_latest_mid_price(self, dt, asset):
return (53.47 - 53.45) / 2.0
class OrderMock(object):
def __init__(self, asset, quantity, order_id=None):
self.asset = asset
self.quantity = quantity
self.order_id = 1 if order_id is None else order_id
self.direction = np.copysign(1, self.quantity)
class AssetMock(object):
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
def test_initial_settings_for_default_simulated_broker():
"""
Tests that the SimulatedBroker settings are set
correctly for default settings.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
# Test a default SimulatedBroker
sb1 = SimulatedBroker(start_dt, exchange, data_handler)
assert sb1.start_dt == start_dt
assert sb1.current_dt == start_dt
assert sb1.exchange == exchange
assert sb1.account_id is None
assert sb1.base_currency == "USD"
assert sb1.initial_funds == 0.0
assert type(sb1.fee_model) == ZeroFeeModel
tcb1 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
assert sb1.cash_balances == tcb1
assert sb1.portfolios == {}
assert sb1.open_orders == {}
# Test a SimulatedBroker with some parameters set
sb2 = SimulatedBroker(
start_dt, exchange, data_handler, account_id="ACCT1234",
base_currency="GBP", initial_funds=1e6,
fee_model=ZeroFeeModel()
)
assert sb2.start_dt == start_dt
assert sb2.current_dt == start_dt
assert sb2.exchange == exchange
assert sb2.account_id == "ACCT1234"
assert sb2.base_currency == "GBP"
assert sb2.initial_funds == 1e6
assert type(sb2.fee_model) == ZeroFeeModel
tcb2 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
tcb2["GBP"] = 1e6
assert sb2.cash_balances == tcb2
assert sb2.portfolios == {}
assert sb2.open_orders == {}
def test_bad_set_base_currency():
"""
Checks _set_base_currency raises ValueError
if a non-supported currency is attempted to be
set as the base currency.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
with pytest.raises(ValueError):
SimulatedBroker(
start_dt, exchange, data_handler, base_currency="XYZ"
)
def test_good_set_base_currency():
"""
Checks _set_base_currency sets the currency
correctly if it is supported by QSTrader.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(
start_dt, exchange, data_handler, base_currency="EUR"
)
assert sb.base_currency == "EUR"
def test_bad_set_initial_funds():
"""
Checks _set_initial_funds raises ValueError
if initial funds amount is negative.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
with pytest.raises(ValueError):
SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=-56.34
)
def test_good_set_initial_funds():
"""
Checks _set_initial_funds sets the initial funds
correctly if it is a positive floating point value.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler, initial_funds=1e4)
assert sb._set_initial_funds(1e4) == 1e4
def test_all_cases_of_set_broker_commission():
"""
Tests that _set_broker_commission correctly sets the
appropriate broker commission model depending upon
user choice.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
# Broker commission is None
sb1 = SimulatedBroker(start_dt, exchange, data_handler)
assert sb1.fee_model.__class__.__name__ == "ZeroFeeModel"
# Broker commission is specified as a subclass
# of FeeModel abstract base class
bc2 = ZeroFeeModel()
sb2 = SimulatedBroker(
start_dt, exchange, data_handler, fee_model=bc2
)
assert sb2.fee_model.__class__.__name__ == "ZeroFeeModel"
# FeeModel is mis-specified and thus
# raises a TypeError
with pytest.raises(TypeError):
SimulatedBroker(
start_dt, exchange, data_handler, fee_model="bad_fee_model"
)
def test_set_cash_balances():
"""
Checks _set_cash_balances for zero and non-zero
initial_funds.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
# Zero initial funds
sb1 = SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=0.0
)
tcb1 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
assert sb1._set_cash_balances() == tcb1
# Non-zero initial funds
sb2 = SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=12345.0
)
tcb2 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
tcb2["USD"] = 12345.0
assert sb2._set_cash_balances() == tcb2
def test_set_initial_portfolios():
"""
Check _set_initial_portfolios method for return
of an empty dictionary.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
assert sb._set_initial_portfolios() == {}
def test_set_initial_open_orders():
"""
Check _set_initial_open_orders method for return
of an empty dictionary.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
assert sb._set_initial_open_orders() == {}
def test_subscribe_funds_to_account():
"""
Tests subscribe_funds_to_account method for:
* Raising ValueError with negative amount
* Correctly setting cash_balances for a positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.subscribe_funds_to_account(-4306.23)
# Correctly setting cash_balances for a positive amount
sb.subscribe_funds_to_account(165303.23)
assert sb.cash_balances[sb.base_currency] == 165303.23
def test_withdraw_funds_from_account():
"""
Tests withdraw_funds_from_account method for:
* Raising ValueError with negative amount
* Raising ValueError for lack of cash
* Correctly setting cash_balances for positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler, initial_funds=1e6)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.withdraw_funds_from_account(-4306.23)
# Raising ValueError for lack of cash
with pytest.raises(ValueError):
sb.withdraw_funds_from_account(2e6)
# Correctly setting cash_balances for a positive amount
sb.withdraw_funds_from_account(3e5)
assert sb.cash_balances[sb.base_currency] == 7e5
def test_get_account_cash_balance():
"""
Tests get_account_cash_balance method for:
* If currency is None, return the cash_balances
* If the currency code isn't in the cash_balances
dictionary, then raise ValueError
* Otherwise, return the appropriate cash balance
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=1000.0
)
# If currency is None, return the cash balances
sbcb1 = sb.get_account_cash_balance()
tcb1 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
tcb1["USD"] = 1000.0
assert sbcb1 == tcb1
# If the currency code isn't in the cash_balances
# dictionary, then raise ValueError
with pytest.raises(ValueError):
sb.get_account_cash_balance(currency="XYZ")
# Otherwise, return appropriate cash balance
assert sb.get_account_cash_balance(currency="USD") == 1000.0
assert sb.get_account_cash_balance(currency="EUR") == 0.0
def test_get_account_total_market_value():
"""
Tests get_account_total_market_value method for:
* The correct market values after cash is subscribed.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Subscribe all necessary funds and create portfolios
sb.subscribe_funds_to_account(300000.0)
sb.create_portfolio(portfolio_id="1", name="My Portfolio #1")
sb.create_portfolio(portfolio_id="2", name="My Portfolio #1")
sb.create_portfolio(portfolio_id="3", name="My Portfolio #1")
sb.subscribe_funds_to_portfolio("1", 100000.0)
sb.subscribe_funds_to_portfolio("2", 100000.0)
sb.subscribe_funds_to_portfolio("3", 100000.0)
# Check that the market value is correct
res_equity = sb.get_account_total_equity()
test_equity = {
"1": 100000.0,
"2": 100000.0,
"3": 100000.0,
"master": 300000.0
}
assert res_equity == test_equity
def test_create_portfolio():
"""
Tests create_portfolio method for:
* If portfolio_id already in the dictionary keys,
raise ValueError
* If it isn't, check that they portfolio and open
orders dictionary was created correctly.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# If portfolio_id isn't in the dictionary, then check it
# was created correctly, along with the orders dictionary
sb.create_portfolio(portfolio_id=1234, name="My Portfolio")
assert "1234" in sb.portfolios
assert isinstance(sb.portfolios["1234"], Portfolio)
assert "1234" in sb.open_orders
assert isinstance(sb.open_orders["1234"], queue.Queue)
# If portfolio is already in the dictionary
# then raise ValueError
with pytest.raises(ValueError):
sb.create_portfolio(
portfolio_id=1234, name="My Portfolio"
)
def test_list_all_portfolio():
"""
Tests list_all_portfolios method for:
* If empty portfolio dictionary, return empty list
* If non-empty, return sorted list via the portfolio IDs
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# If empty portfolio dictionary, return empty list
assert sb.list_all_portfolios() == []
# If non-empty, return sorted list via the portfolio IDs
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.create_portfolio(portfolio_id="z154", name="My Portfolio #2")
sb.create_portfolio(portfolio_id="abcd", name="My Portfolio #3")
res_ports = sorted([
p.portfolio_id
for p in sb.list_all_portfolios()
])
test_ports = ["1234", "abcd", "z154"]
assert res_ports == test_ports
def test_subscribe_funds_to_portfolio():
"""
Tests subscribe_funds_to_portfolio method for:
* Raising ValueError with negative amount
* Raising ValueError if portfolio does not exist
* Correctly setting cash_balances for a positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.subscribe_funds_to_portfolio("1234", -4306.23)
# Raising KeyError if portfolio doesn't exist
with pytest.raises(KeyError):
sb.subscribe_funds_to_portfolio("1234", 5432.12)
# Add in cash balance to the account
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(165303.23)
# Raising ValueError if not enough cash
with pytest.raises(ValueError):
sb.subscribe_funds_to_portfolio("1234", 200000.00)
# If everything else worked, check balances are correct
sb.subscribe_funds_to_portfolio("1234", 100000.00)
assert sb.cash_balances[sb.base_currency] == 65303.23000000001
assert sb.portfolios["1234"].cash == 100000.00
def test_withdraw_funds_from_portfolio():
"""
Tests withdraw_funds_from_portfolio method for:
* Raising ValueError with negative amount
* Raising ValueError if portfolio does not exist
* Raising ValueError for a lack of cash
* Correctly setting cash_balances for a positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.withdraw_funds_from_portfolio("1234", -4306.23)
# Raising KeyError if portfolio doesn't exist
with pytest.raises(KeyError):
sb.withdraw_funds_from_portfolio("1234", 5432.12)
# Add in cash balance to the account
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(165303.23)
sb.subscribe_funds_to_portfolio("1234", 100000.00)
# Raising ValueError if not enough cash
with pytest.raises(ValueError):
sb.withdraw_funds_from_portfolio("1234", 200000.00)
# If everything else worked, check balances are correct
sb.withdraw_funds_from_portfolio("1234", 50000.00)
assert sb.cash_balances[sb.base_currency] == 115303.23000000001
assert sb.portfolios["1234"].cash == 50000.00
def test_get_portfolio_cash_balance():
"""
Tests get_portfolio_cash_balance method for:
* Raising ValueError if portfolio_id not in keys
* Correctly obtaining the value after cash transfers
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError if portfolio_id not in keys
with pytest.raises(ValueError):
sb.get_portfolio_cash_balance("5678")
# Create fund transfers and portfolio
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(175000.0)
sb.subscribe_funds_to_portfolio("1234", 100000.00)
# Check correct values obtained after cash transfers
assert sb.get_portfolio_cash_balance("1234") == 100000.0
def test_get_portfolio_total_market_value():
"""
Tests get_portfolio_total_market_value method for:
* Raising ValueError if portfolio_id not in keys
* Correctly obtaining the market value after cash transfers
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising KeyError if portfolio_id not in keys
with pytest.raises(KeyError):
sb.get_portfolio_total_market_value("5678")
# Create fund transfers and portfolio
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(175000.0)
sb.subscribe_funds_to_portfolio("1234", 100000.00)
# Check correct values obtained after cash transfers
assert sb.get_portfolio_total_equity("1234") == 100000.0
def test_submit_order():
"""
Tests the execute_order method for:
* Raises ValueError if no portfolio_id
* Raises ValueError if bid/ask is (np.NaN, np.NaN)
* Checks that bid/ask are correctly set dependent
upon order direction
* Checks that portfolio values are correct after
carrying out a transaction
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
# Raising KeyError if portfolio_id not in keys
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
asset = 'EQ:RDSB'
quantity = 100
order = OrderMock(asset, quantity)
with pytest.raises(KeyError):
sb.submit_order("1234", order)
# Raises ValueError if bid/ask is (np.NaN, np.NaN)
exchange_exception = ExchangeMockException()
sbnp = SimulatedBroker(start_dt, exchange_exception, data_handler)
sbnp.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
quantity = 100
order = OrderMock(asset, quantity)
with pytest.raises(ValueError):
sbnp._execute_order(start_dt, "1234", order)
# Checks that bid/ask are correctly set dependent on
# order direction
# Positive direction
exchange_price = ExchangeMockPrice()
data_handler_price = DataHandlerMockPrice()
sbwp = SimulatedBroker(start_dt, exchange_price, data_handler_price)
sbwp.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sbwp.subscribe_funds_to_account(175000.0)
sbwp.subscribe_funds_to_portfolio("1234", 100000.00)
quantity = 1000
order = OrderMock(asset, quantity)
sbwp.submit_order("1234", order)
sbwp.update(start_dt)
port = sbwp.portfolios["1234"]
assert port.cash == 46530.0
assert port.total_market_value == 53470.0
assert port.total_equity == 100000.0
assert port.pos_handler.positions[asset].unrealised_pnl == 0.0
assert port.pos_handler.positions[asset].market_value == 53470.0
assert port.pos_handler.positions[asset].net_quantity == 1000
# Negative direction
exchange_price = ExchangeMockPrice()
sbwp = SimulatedBroker(start_dt, exchange_price, data_handler_price)
sbwp.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sbwp.subscribe_funds_to_account(175000.0)
sbwp.subscribe_funds_to_portfolio("1234", 100000.00)
quantity = -1000
order = OrderMock(asset, quantity)
sbwp.submit_order("1234", order)
sbwp.update(start_dt)
port = sbwp.portfolios["1234"]
assert port.cash == 153450.0
assert port.total_market_value == -53450.0
assert port.total_equity == 100000.0
assert port.pos_handler.positions[asset].unrealised_pnl == 0.0
assert port.pos_handler.positions[asset].market_value == -53450.0
assert port.pos_handler.positions[asset].net_quantity == -1000
def test_update_sets_correct_time():
"""
Tests that the update method sets the current
time correctly.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2017-10-07 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
sb.update(new_dt)
assert sb.current_dt == new_dt
| [
"[email protected]"
] | |
625de4297ccafd3c704bbe927b431be4cfafbc38 | 6066eba4314ee5c8c26894e8864103e27bc547ba | /test/algorithms/ground_state_solvers/minimum_eigensolver_factories/test_vqe_ucc_factory.py | 7da78201563cde2ea1075c5052f0467f010e1f09 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Global19/qiskit-nature | 8098857e014fbc984028d1b8b1cb0a5055ecc407 | 5e932f6055e68649b4d6b0140dc926b07ba316e4 | refs/heads/master | 2023-04-15T01:42:49.663502 | 2021-04-09T15:56:08 | 2021-04-09T15:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,922 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test VQE UCC MinimumEigensovler Factory """
import unittest
from test import QiskitNatureTestCase
from qiskit import BasicAer
from qiskit.utils import QuantumInstance
from qiskit.opflow import AerPauliExpectation
from qiskit.algorithms.optimizers import COBYLA
from qiskit_nature.circuit.library import HartreeFock, UCCSD
from qiskit_nature.operators.second_quantization.qubit_converter import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import \
VQEUCCFactory
class TestVQEUCCFactory(QiskitNatureTestCase):
""" Test VQE UCC MinimumEigensovler Factory """
# NOTE: The actual usage of this class is mostly tested in combination with the ground-state
# eigensolvers (one module above).
def setUp(self):
super().setUp()
self.converter = QubitConverter(JordanWignerMapper())
self.seed = 50
self.quantum_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'),
shots=1,
seed_simulator=self.seed,
seed_transpiler=self.seed)
self._vqe_ucc_factory = VQEUCCFactory(self.quantum_instance)
def test_setters_getters(self):
""" Test Getter/Setter """
with self.subTest("Quantum Instance"):
self.assertEqual(self._vqe_ucc_factory.quantum_instance, self.quantum_instance)
self._vqe_ucc_factory.quantum_instance = None
self.assertEqual(self._vqe_ucc_factory.quantum_instance, None)
with self.subTest("Optimizer"):
self.assertEqual(self._vqe_ucc_factory.optimizer, None)
optimizer = COBYLA()
self._vqe_ucc_factory.optimizer = optimizer
self.assertEqual(self._vqe_ucc_factory.optimizer, optimizer)
with self.subTest("Initial Point"):
self.assertEqual(self._vqe_ucc_factory.initial_point, None)
initial_point = [1, 2, 3]
self._vqe_ucc_factory.initial_point = initial_point
self.assertEqual(self._vqe_ucc_factory.initial_point, initial_point)
with self.subTest("Expectation"):
self.assertEqual(self._vqe_ucc_factory.expectation, None)
expectation = AerPauliExpectation()
self._vqe_ucc_factory.expectation = expectation
self.assertEqual(self._vqe_ucc_factory.expectation, expectation)
with self.subTest("Include Custom"):
self.assertEqual(self._vqe_ucc_factory.include_custom, False)
self._vqe_ucc_factory.include_custom = True
self.assertEqual(self._vqe_ucc_factory.include_custom, True)
with self.subTest("Ansatz"):
self.assertEqual(self._vqe_ucc_factory.ansatz, None)
ansatz = UCCSD()
self._vqe_ucc_factory.ansatz = ansatz
self.assertTrue(isinstance(self._vqe_ucc_factory.ansatz, UCCSD))
with self.subTest("Initial State"):
self.assertEqual(self._vqe_ucc_factory.initial_state, None)
initial_state = HartreeFock(4, (1, 1), self.converter)
self._vqe_ucc_factory.initial_state = initial_state
self.assertEqual(self._vqe_ucc_factory.initial_state, initial_state)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
19881f72f2314c238faa43508926f8455e67eeb1 | 0d1f88e0c9e463c5e42b3c78c708df8b03f99a2b | /word/models.py | 3b1f49939b5a14ff2e4e64a009898574f25f5d7a | [] | no_license | sefaky/Pre4Define | 67784cf1ca61df81683dcad3e32cd9b15eb7d7d3 | ad45d8b6cda778cb554ff2ee46df09b3276fb1ba | refs/heads/main | 2023-02-18T17:03:36.785752 | 2021-01-20T10:17:45 | 2021-01-20T10:17:45 | 330,632,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from django.db import models
class WordsDefining(models.Model):
word = models.CharField(max_length=30)
category = models.CharField(max_length=30)
pegi = models.CharField(max_length=3)
commentCount = models.CharField(max_length=11)
downloads = models.CharField(max_length=12)
ranking = models.CharField(max_length=4)
def __str__(self):
return self.word + "" + self.category | [
"[email protected]"
] | |
7bc5e16bd591b8abde3f6bbdc886d227d1cb9588 | 85ed63e6ae798cc26bb7db989912405c2675dbb2 | /MarkowitzSerde.py | 2fbc40b496a5c6a26c09f1e47016064ee0391bbf | [
"MIT"
] | permissive | LaplaceKorea/APIClient | 3608011f81e432237ed8c047b28e6323db3121fa | e772482c3d9cbedee98f46a3529dca5acc254f3c | refs/heads/main | 2023-07-20T06:33:37.141537 | 2021-08-26T15:48:42 | 2021-08-26T15:48:42 | 380,518,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,458 | py | from dataclasses import dataclass
from typing import Any, Dict, List, Union
import numpy as np
from UserTokenSerde import *
import orjson
import json
from dataclasses_serialization.json import JSONSerializer
@dataclass
class Markowitz:
# min_p (w - p)^T S (w-p)
# s.t. sum_i(p_i) = 1
S: np.ndarray
w: np.ndarray
@dataclass
class MarkowitzSerde:
S: List[List[float]]
w: List[float]
def makeMarkowitzSerde(m: Markowitz) -> MarkowitzSerde:
s = orjson.dumps(m, option=orjson.OPT_NAIVE_UTC | orjson.OPT_SERIALIZE_NUMPY)
return JSONSerializer.deserialize(MarkowitzSerde, json.loads(s))
def makeMarkowizt(m: MarkowitzSerde) -> Markowitz:
return Markowitz(np.array(m.S, dtype=np.float), np.array(m.w, dtype=np.float))
@dataclass
class MarkowitzSolution:
p: np.ndarray
status: OperationStatusSerde
@dataclass
class MarkowitzSolutionSerde:
p: List[float]
status: OperationStatusSerde
def makeMarkowitzSolutionSerde(m: MarkowitzSolution) -> MarkowitzSolutionSerde:
s = orjson.dumps(m, option=orjson.OPT_NAIVE_UTC | orjson.OPT_SERIALIZE_NUMPY)
#print("mms:", s)
return JSONSerializer.deserialize(MarkowitzSolutionSerde, json.loads(s))
def makeMarkowitzSolution(m: MarkowitzSolutionSerde) -> MarkowitzSolution:
#print("solution => ", ParamSpec)
return MarkowitzSolution(np.array(m.p, dtype=np.float), m.status)
@dataclass
class MarkowitzProblem:
m: Markowitz
config: Dict[str, Union[str,float,int]]
info: UserTokenSerde
@dataclass
class MarkowitzProblemSerde:
m: MarkowitzSerde
config: Dict[str, Union[str,float,int]]
info: UserTokenSerde
def makeMarkowitzProblemSerde(m: MarkowitzProblem) -> MarkowitzProblemSerde:
s = orjson.dumps(m, option=orjson.OPT_NAIVE_UTC | orjson.OPT_SERIALIZE_NUMPY)
return JSONSerializer.deserialize(MarkowitzProblemSerde, json.loads(s))
def makeMarkowitzProblem(m: MarkowitzProblemSerde) -> MarkowitzProblem:
return MarkowitzProblem(makeMarkowizt(m.m), m.config, m.info)
def serializeMarkowitzProblemQuery(m: MarkowitzProblem) -> str:
m2 = makeMarkowitzProblemSerde(m)
query = '{ "__class__":"MarkowitzProblem", "query":' + orjson.dumps(m2).decode("utf-8") + '}'
return query
def deserializeMarkowitzSolutionResponse(r: str) -> MarkowitzSolution:
rv = json.loads(r)
p1 = JSONSerializer.deserialize(MarkowitzSolutionSerde, rv)
print("p1=", p1)
s2 = makeMarkowitzSolution(p1)
return s2 | [
"[email protected]"
] | |
672679732bb66c1cc979a3bd42a4d337e9a4a74a | a468016412cc2b435501de4d3ee5c4d2be5fa19f | /coupons/views.py | 0a2a87766e6bc3b9517fd0e671635ecb1af860dd | [] | no_license | sadakchap/first-full-ecom | ef022596c05c29cae0842bae34d201cd4af08b93 | 881b39ec60dff3aef04105e3d08e3be3e16f6420 | refs/heads/master | 2020-08-07T05:02:48.165873 | 2019-10-07T06:16:16 | 2019-10-07T06:16:16 | 213,308,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from django.shortcuts import render,redirect
from django.views.decorators.http import require_POST
from .models import Coupon
from .forms import CouponApplyForm
from django.utils import timezone
# Create your views here.
@require_POST
def coupon_apply(request):
now = timezone.now()
form = CouponApplyForm(request.POST)
if form.is_valid():
code = form.cleaned_data.get('code')
try:
coupon = Coupon.objects.get(code__iexact=code,valid_from__lte=now,valid_to__gte=now,active=True)
request.session['coupon_id'] = coupon.id
except Coupon.DoesNotExist:
request.session['coupon_id'] = None
return redirect('cart:cart_detail')
| [
"[email protected]"
] | |
998c833201d5781dff44b53d3b39f08131db1f84 | edcfa1f24be85d356305c23fb488d1a3ba626ce4 | /django/app/wsgi.py | 7d395aac3dcfafb08868de90288904b12dfc502c | [
"Apache-2.0"
] | permissive | isabella232/pycon-2021-workshop-app-search | 9b0cafafc0ee7fea484f4c47103b66170b6ba93f | aa2a24715ccae222f2dc53a85c9a65ac2dbae6d9 | refs/heads/main | 2023-04-25T06:00:29.362636 | 2021-05-14T20:55:48 | 2021-05-14T20:55:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for pycon_2021 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
application = get_wsgi_application()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.