hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
60ead80e847ae9dc084472d4e5417a3a4311cbff
9,413
py
Python
analisis_de_variables.py
scmarquez/Hause-Price-Kaggle-Competition
5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21
[ "MIT" ]
null
null
null
analisis_de_variables.py
scmarquez/Hause-Price-Kaggle-Competition
5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21
[ "MIT" ]
null
null
null
analisis_de_variables.py
scmarquez/Hause-Price-Kaggle-Competition
5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Dec 29 16:40:53 2017 @author: Sergio """ #Analisis de variables import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import ensemble, tree, linear_model from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import r2_score, mean_squared_error from sklearn.utils import shuffle import warnings #Ignorar los warnings warnings.filterwarnings('ignore') #Lectura de los datos #En train se guandan los datos con los que se entrenará al modelo train = pd.read_csv('train.csv') #En test se guarda el conjunto de datos para el test test = pd.read_csv('test.csv') #Primero hay que eliminar las varibles que tengan un número alto de valores perdidos #El número de valores perdidos de cada conjunto en cada variable NAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test']) #print(NAs) #Eliminar todas las variables que tengan más de un 0.2 de valores perdidos eliminar = [] nvars = 0 for index, row in NAs.iterrows(): print(index) print(row['Test']) if (row['Test'] > 0.2) or (row ['Train'] > 0.2): eliminar.append(index) #En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas #Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar #Esa variable debe seguir estando en nuestro conjunto print(eliminar) eliminar.remove('Alley') eliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley train.drop(eliminar,axis=1, inplace=True) test.drop(eliminar,axis=1, inplace=True) """ Ahora es necesario un análisis más profundo de las variables. En primer lugar encontramos algunas variables que parecen tener una representación numérica, como por ejemplo 'MSSubClass' o 'OverallCond'. Al leer la documentación sobre que información aportan las variables encontramos que OverallCond aunque sea una variable aparentemente nominal expresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10 """ #Variables numéricas que deben ser transformadas a string test['MSSubClass'] = test['MSSubClass'].astype(str) train['MSSubClass'] = train['MSSubClass'].astype(str) test['YrSold'] = test['YrSold'].astype(str) train['YrSold'] = train['YrSold'].astype(str) #Variables categóricas que deben ser numéricas, ya que expresan puntuación #El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final ExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} ExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} BsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} BsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}} HeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} KitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} FireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} GarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} GarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}} PoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}} #Reemplazar los valores en las tablas train.replace(ExterQualvalues,inplace=True) train.replace(ExterCondvalues,inplace=True) train.replace(BsmQualvalues,inplace=True) train.replace(BsmCondvalues,inplace=True) train.replace(HeatingQCvalues,inplace=True) train.replace(KitchenQualvalues,inplace=True) train.replace(FireplaceQuvalues,inplace=True) train.replace(GarageCondvalues,inplace=True) train.replace(GarageQualvalues,inplace=True) train.replace(PoolQCvalues,inplace=True) test.replace(ExterQualvalues,inplace=True) test.replace(ExterCondvalues,inplace=True) test.replace(BsmQualvalues,inplace=True) test.replace(BsmCondvalues,inplace=True) test.replace(HeatingQCvalues,inplace=True) test.replace(KitchenQualvalues,inplace=True) test.replace(FireplaceQuvalues,inplace=True) test.replace(GarageCondvalues,inplace=True) test.replace(GarageQualvalues,inplace=True) test.replace(PoolQCvalues,inplace=True) #Ahora tenemos todas las variables con un tipo de dato 'correcto' #Cuantas variables de cada tipo tenemos train_labels = train.pop('SalePrice') features = pd.concat([train, test], keys=['train', 'test']) enteras = features.dtypes[features.dtypes == 'int64'].index flotantes = features.dtypes[features.dtypes == 'float64'].index nominales = features.dtypes[features.dtypes == 'object'].index #Se pasa a formato lista para su uso ent = [] for var in enteras: ent.append(var) flot = [] for var in flotantes: flot.append(var) nom = [] for var in nominales: nom.append(var) numericas = ent+flot #Ahora es necesario rellenar los valores perdidos de cada variable. """En algunas de las variables que han sido transformadas a numéricas NAN no expresa que el dato no exista, sino que expresa puntuación 0""" features['BsmtQual'] = features['BsmtQual'].fillna(0) features['BsmtCond'] = features['BsmtCond'].fillna(0) features['FireplaceQu'] = features['FireplaceQu'].fillna(0) features['GarageQual'] = features['GarageQual'].fillna(0) features['GarageCond'] = features['GarageCond'].fillna(0) #El resto de variables pueden rellenarse con la media for var in numericas: if features[var].isnull().sum() > 0: features[var] = features[var].fillna(features[var].mean()) #El resto ce variables nomnales se rellenan con el valor más frecuente for var in nominales: if features[var].isnull().sum() > 0: features[var] = features[var].fillna(features[var].mode()[0]) """Una vez que la tabla de datos está en el formato correcto vamos a estudiar la correlación de las variables con el precio. Las variables que presenten una correlación baja se descartarán ya que lo único que van a hacer es hacer que nuestro modelo se impreciso. Si se imputan demasiadas variables perderemos información valiosa y el modelo volverá a ser impreciso. Sacando un Heatmap se puede ver la correlación de las variables""" #train_labels = np.log(train_labels)#La transformación logarítmica de los datos los aproxima a una distribución normal complete = features.loc['train']#Solo se usan las entradas de entrenamiento complete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo correlationPlot = complete.corr()#Mantiene la matriz de correlación en un DataFrame f,ax = plt.subplots(figsize=(12,9))#Configuración del tamaño de la imagen sns.heatmap(correlationPlot,vmax=.8,square=True)#Crea el heatmap con los valores de correlación plt.yticks(rotation=0)#cambia el eje de las etiquetas del gráfico para que se vean bien plt.xticks(rotation=90)#cambia el eje de las etiquetas del gráfico para que se vean bien plt.show()#Muestra el gráfico f.savefig('Heatmap.png')#Guarda el gráfico en un archivo """La matriz de correlación muestra la correlación entre dos variables de forma que los valores más claros muestran que dos variables tienen una correlación alta El siguiente paso del análisis es buscar que variables muestran una correlación alta entre sí y eliminar una de esas variables, ya que es información redundante y puede eliminarse. Otra manera de enfocar el problema es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable. En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlación. """ #Crear la lista de variables con correlación alta con el precio de la vivienda """Inciso: calcular la correlación antes de aplicar la escala logaritmica a los datos tiene sentido, pues el coeficiente de correlación de Pearson no varía con la escala y el origen. Además solo nos sirve para hacer una aproximación hacia que variables usar o no en el algoritmo. Después si será necesario hacer que las variables tengan una distribución normalizada. """ HighCorrelation = [] for index, row in correlationPlot.iterrows(): if (row['SalePrice'] >= 0.5) or (row ['SalePrice'] <= -0.5): HighCorrelation.append(index) print(row['SalePrice']) print("total de variables: "+str(len(HighCorrelation))) print(HighCorrelation) """Ahora hay que examniar las variables nominales que se tendrán en cuenta Para hacer este análisis se va a usar una gráfica que exprese la relación entre el precio y el valor de la vivienda.""" complete = features.loc['train'] complete = pd.concat([complete,train_labels],axis=1) malas = [#'MSSubClass', 'LandContour', 'LandSlope', #'RoofStyle', #'RoofMatl', 'Exterior2nd', #'Exterior1st', 'MasVnrType', 'BsmtExposure', 'Functional', 'YrSold'] ################################## #malas = ['Utilities', 'RoofMatl','Heating','Functional'] for var in malas: data = pd.concat([complete[var],complete['SalePrice']],axis=1) f,ax = plt.subplots(figsize=(12,9)) fig = sns.boxplot(x=var,y="SalePrice",data=data) fig.axis(ymin=0,ymax=800000) plt.xticks(rotation=90) f.savefig(str(var)+'_Price.png') """ aparentemente malas variables: LandContour LandScope RoofStyle RoofMatl Exterior2nd Exterior1st MasVnrType BsmtExposure Functional YrSold """ """Analisis con PCA"""
41.46696
147
0.739403
0
0
0
0
0
0
0
0
5,509
0.581425
60eb25016e8dffa48a7ee4e49cffca99635d22f2
566
py
Python
query-gen.py
mdatsev/prostgres
3418258a8b832546ef4d5009867bf1cf79248b7b
[ "Unlicense" ]
null
null
null
query-gen.py
mdatsev/prostgres
3418258a8b832546ef4d5009867bf1cf79248b7b
[ "Unlicense" ]
null
null
null
query-gen.py
mdatsev/prostgres
3418258a8b832546ef4d5009867bf1cf79248b7b
[ "Unlicense" ]
null
null
null
import random import sys ntables = 100 ncols = 100 nrows = 10000 def printstderr(s): sys.stderr.write(s + '\n') sys.stderr.flush() def get_value(): return random.randint(-99999999, 99999999) for t in range(ntables): printstderr(f'{t}/{ntables}') print(f"create table x ({','.join(['x int'] * ncols)});") for r in range(nrows): print(f"insert into _last ({','.join(['x'] * ncols)}) values (", end='') for c in range(ncols): print(get_value(), end=('' if c==ncols-1 else ',')) print(');') # 10 min to generate # 3 min to process
21.769231
76
0.609541
0
0
0
0
0
0
0
0
176
0.310954
60ebcdffbce13db306c3a548fbb31af96bfe8e29
1,549
py
Python
molecule/default/tests/test_default.py
joshbenner/sensu-ansible-role
ecc92ba3462d7edf50ad96ddda61080ba58c29f8
[ "BSD-3-Clause" ]
null
null
null
molecule/default/tests/test_default.py
joshbenner/sensu-ansible-role
ecc92ba3462d7edf50ad96ddda61080ba58c29f8
[ "BSD-3-Clause" ]
1
2018-10-31T03:14:05.000Z
2018-10-31T03:14:05.000Z
molecule/default/tests/test_default.py
joshbenner/sensu-ansible-role
ecc92ba3462d7edf50ad96ddda61080ba58c29f8
[ "BSD-3-Clause" ]
null
null
null
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_packages(host): package = host.package('sensu') assert package.is_installed assert '1.7.0' in package.version def test_dir_ownership(host): assert host.file('/opt/sensu').group == 'sensu' def test_main_config(host): f = host.file('/etc/sensu/config.json') assert f.exists assert f.is_file assert f.user == 'sensu' assert f.group == 'sensu' assert f.mode == 0o600 assert f.contains('rabbitmq') assert f.contains('check-cpu.rb') assert f.contains('"foo": "bar"') assert f.contains('example_subscription') assert f.contains('"zip": "zap"') assert not f.contains('subscription_to_be_overridden') def test_server_running(host): server = host.service('sensu-server') assert server.is_running assert server.is_enabled def test_api_running(host): api = host.service('sensu-api') assert api.is_running assert api.is_enabled def test_client_running(host): client = host.service('sensu-client') assert client.is_running assert client.is_enabled def test_api_listening(host): assert host.socket('tcp://0.0.0.0:4567').is_listening def test_plugin_installed(host): assert host.file('/opt/sensu/embedded/bin/check-memory.rb').exists # Tests extension install/enable def test_snmp_listening(host): assert host.socket('udp://0.0.0.0:1062').is_listening
24.587302
70
0.713363
0
0
0
0
0
0
0
0
358
0.231117
60ec05a1e04f7befa5818096872bdb308d2b1dde
4,552
py
Python
wgskex/worker/netlink.py
moepman/wgskex
7a931088b5910f8034ad5a1362777e08c47c42fe
[ "0BSD" ]
2
2021-01-05T23:42:35.000Z
2021-10-03T14:12:30.000Z
wgskex/worker/netlink.py
moepman/wgskex
7a931088b5910f8034ad5a1362777e08c47c42fe
[ "0BSD" ]
null
null
null
wgskex/worker/netlink.py
moepman/wgskex
7a931088b5910f8034ad5a1362777e08c47c42fe
[ "0BSD" ]
null
null
null
import hashlib import logging import re from dataclasses import dataclass from datetime import datetime, timedelta from textwrap import wrap from typing import Dict, List from pyroute2 import IPRoute, NDB, WireGuard from wgskex.common.utils import mac2eui64 logger = logging.getLogger(__name__) # TODO make loglevel configurable logger.setLevel("DEBUG") @dataclass class WireGuardClient: public_key: str domain: str remove: bool @property def lladdr(self) -> str: m = hashlib.md5() m.update(self.public_key.encode("ascii") + b"\n") hashed_key = m.hexdigest() hash_as_list = wrap(hashed_key, 2) temp_mac = ":".join(["02"] + hash_as_list[:5]) lladdr = re.sub(r"/\d+$", "/128", mac2eui64(mac=temp_mac, prefix="fe80::/10")) return lladdr @property def vx_interface(self) -> str: return f"vx-{self.domain}" @property def wg_interface(self) -> str: return f"wg-{self.domain}" """WireGuardClient describes complete configuration for a specific WireGuard client Attributes: public_key: WireGuard Public key domain: Domain Name of the WireGuard peer lladdr: IPv6 lladdr of the WireGuard peer wg_interface: Name of the WireGuard interface this peer will use vx_interface: Name of the VXLAN interface we set a route for the lladdr to remove: Are we removing this peer or not? """ def wg_flush_stale_peers(domain: str) -> List[Dict]: stale_clients = find_stale_wireguard_clients("wg-" + domain) result = [] for stale_client in stale_clients: stale_wireguard_client = WireGuardClient( public_key=stale_client, domain=domain, remove=True, ) result.append(link_handler(stale_wireguard_client)) return result # pyroute2 stuff def link_handler(client: WireGuardClient) -> Dict[str, Dict]: results = {} results.update({"Wireguard": wireguard_handler(client)}) try: results.update({"Route": route_handler(client)}) except Exception as e: results.update({"Route": {"Exception": e}}) results.update({"Bridge FDB": bridge_fdb_handler(client)}) return results def bridge_fdb_handler(client: WireGuardClient) -> Dict: with IPRoute() as ip: return ip.fdb( "del" if client.remove else "append", # FIXME this list may be empty if the interface is not existing ifindex=ip.link_lookup(ifname=client.vx_interface)[0], lladdr="00:00:00:00:00:00", dst=re.sub(r"/\d+$", "", client.lladdr), nda_ifindex=ip.link_lookup(ifname=client.wg_interface)[0], ) def wireguard_handler(client: WireGuardClient) -> Dict: with WireGuard() as wg: wg_peer = { "public_key": client.public_key, "persistent_keepalive": 15, "allowed_ips": [client.lladdr], "remove": client.remove, } return wg.set(client.wg_interface, peer=wg_peer) def route_handler(client: WireGuardClient) -> Dict: with IPRoute() as ip: return ip.route( "del" if client.remove else "replace", dst=client.lladdr, oif=ip.link_lookup(ifname=client.wg_interface)[0], ) def find_wireguard_domains() -> List[str]: with NDB() as ndb: # ndb.interfaces[{"kind": "wireguard"}]] seems to trigger https://github.com/svinota/pyroute2/issues/737 iface_values = ndb.interfaces.values() interfaces = [iface.get("ifname", "") for iface in iface_values if iface.get("kind", "") == "wireguard"] result = [iface.removeprefix("wg-") for iface in interfaces if iface.startswith("wg-")] return result def find_stale_wireguard_clients(wg_interface: str) -> List[str]: with WireGuard() as wg: all_clients = [] infos = wg.info(wg_interface) for info in infos: clients = info.get_attr("WGDEVICE_A_PEERS") if clients is not None: all_clients.extend(clients) three_minutes_ago = (datetime.now() - timedelta(minutes=3)).timestamp() stale_clients = [ client.get_attr("WGPEER_A_PUBLIC_KEY").decode("utf-8") for client in all_clients # TODO add never connected peers to a list and remove them on next call if 0 < (client.get_attr("WGPEER_A_LAST_HANDSHAKE_TIME") or {}).get("tv_sec", int()) < three_minutes_ago ] return stale_clients
30.756757
115
0.639719
1,078
0.236819
0
0
1,089
0.239236
0
0
1,118
0.245606
60ec772a2bff1ce4c7f82d3ad31a4b3889b15287
708
py
Python
api/main.py
Ju99ernaut/super-fastapi
83c232bcaff1006d413a9945ced3ba398b673505
[ "MIT" ]
null
null
null
api/main.py
Ju99ernaut/super-fastapi
83c232bcaff1006d413a9945ced3ba398b673505
[ "MIT" ]
null
null
null
api/main.py
Ju99ernaut/super-fastapi
83c232bcaff1006d413a9945ced3ba398b673505
[ "MIT" ]
null
null
null
import uvicorn from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from routes import items import config from constants import * config.parse_args() app = FastAPI( title="API", description="API boilerplate", version="1.0.0", openapi_tags=API_TAGS_METADATA, ) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) app.include_router(items.router) @app.get("/") async def root(): return { "docs": "api documentation at /docs or /redoc", } if __name__ == "__main__": uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
18.153846
82
0.686441
0
0
0
0
106
0.149718
92
0.129944
105
0.148305
60ed370b9d6be678d96e0ec349072e5fb104c1f2
1,582
py
Python
gellifinsta/models.py
vallka/djellifique
fb84fba6be413f9d38276d89ae84aeaff761218f
[ "MIT" ]
null
null
null
gellifinsta/models.py
vallka/djellifique
fb84fba6be413f9d38276d89ae84aeaff761218f
[ "MIT" ]
null
null
null
gellifinsta/models.py
vallka/djellifique
fb84fba6be413f9d38276d89ae84aeaff761218f
[ "MIT" ]
null
null
null
from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.html import mark_safe # Create your models here. class Gellifinsta(models.Model): class Meta: ordering = ['-taken_at_datetime'] shortcode = models.CharField(_("Shortcode"), max_length=20) taken_at_datetime = models.DateTimeField(_("taken at")) username = models.CharField(_("Username"), max_length=100) is_active = models.BooleanField(_("Active"),default=True) is_video = models.BooleanField(_("Video"),default=False) file_path = models.CharField(_("File Path"), max_length=500) url = models.CharField(_("URL"), max_length=500) created_dt = models.DateTimeField(_("Created Date/Time"), auto_now_add=True, null=True) updated_dt = models.DateTimeField(_("Updated Date/Time"), auto_now=True, null=True) caption = models.TextField(_("Caption"), blank=True, null=True) tags = models.TextField(_("Tags"), blank=True, null=True) def __str__(self): return self.shortcode + ':' + str(self.taken_at_datetime) def image_tag(self): return mark_safe('<img src="%s" width="250" />' % (self.url)) image_tag.short_description = 'Image' def tags_spaced(self): return self.tags.replace(',',' ') tags_spaced.short_description = 'Tags' class Products(models.Model): class Meta: ordering = ['name'] name = models.CharField(_("Name"), max_length=100, unique=True) is_active = models.BooleanField(_("Active"),default=True) def __str__(self): return self.name
35.954545
91
0.689633
1,425
0.900759
0
0
0
0
0
0
234
0.147914
60ed6b24088a86522dceda37eeecc0306f7958dc
1,335
py
Python
scanBase/migrations/0003_ipsection.py
wsqy/sacn_server
e91a41a71b27926fbcfbe3f22bbb6bbc61b39461
[ "Apache-2.0" ]
null
null
null
scanBase/migrations/0003_ipsection.py
wsqy/sacn_server
e91a41a71b27926fbcfbe3f22bbb6bbc61b39461
[ "Apache-2.0" ]
null
null
null
scanBase/migrations/0003_ipsection.py
wsqy/sacn_server
e91a41a71b27926fbcfbe3f22bbb6bbc61b39461
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-01-16 13:35 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('scanBase', '0002_auto_20180116_1321'), ] operations = [ migrations.CreateModel( name='IPSection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ip_section', models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='ip段')), ('ip_start', models.GenericIPAddressField(blank=True, null=True, verbose_name='开始ip')), ('ip_end', models.GenericIPAddressField(blank=True, null=True, verbose_name='结束ip')), ('total', models.IntegerField(blank=True, null=True, verbose_name='总量')), ('deal_time', models.DateTimeField(blank=True, null=True, verbose_name='处理时间')), ('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scanBase.CountryInfo', verbose_name='所属国家')), ], options={ 'verbose_name_plural': 'ip段信息', 'verbose_name': 'ip段信息', }, ), ]
40.454545
140
0.612734
1,188
0.862745
0
0
0
0
0
0
326
0.236747
60edce602ee84c179651834e36a3725524081131
3,522
py
Python
sts/train.py
LostCow/KLUE
73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77
[ "MIT" ]
18
2021-12-22T09:41:24.000Z
2022-03-19T12:54:30.000Z
sts/train.py
LostCow/KLUE
73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77
[ "MIT" ]
null
null
null
sts/train.py
LostCow/KLUE
73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77
[ "MIT" ]
4
2021-12-26T11:31:46.000Z
2022-03-28T07:55:45.000Z
import argparse import numpy as np import os import torch from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments from model import RobertaForStsRegression from dataset import KlueStsWithSentenceMaskDataset from utils import read_json, seed_everything from metric import compute_metrics def main(args): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") config = AutoConfig.from_pretrained(args.model_name_or_path) config.num_labels = args.num_labels tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) train_file_path = os.path.join(args.data_dir, args.train_filename) valid_file_path = os.path.join(args.data_dir, args.valid_filename) train_json = read_json(train_file_path) valid_json = read_json(valid_file_path) train_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510) valid_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510) model = RobertaForStsRegression.from_pretrained( args.model_name_or_path, config=config ) model.to(device) training_args = TrainingArguments( output_dir=args.model_dir, save_total_limit=args.save_total_limit, save_steps=args.save_steps, num_train_epochs=args.num_train_epochs, learning_rate=args.learning_rate, per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=64, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=args.weight_decay, logging_dir="./logs", logging_steps=args.save_steps, evaluation_strategy=args.evaluation_strategy, metric_for_best_model="pearsonr", fp16=True, fp16_opt_level="O1", eval_steps=args.save_steps, load_best_model_at_end=True, ) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, compute_metrics=compute_metrics, ) trainer.train() model.save_pretrained(args.model_dir) tokenizer.save_pretrained(args.model_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() # data_arg parser.add_argument("--data_dir", type=str, default="./data") parser.add_argument("--model_dir", type=str, default="./model") parser.add_argument("--output_dir", type=str, default="./output") parser.add_argument("--model_name_or_path", type=str, default="klue/roberta-large") parser.add_argument( "--train_filename", type=str, default="klue-sts-v1.1_train.json" ) parser.add_argument("--valid_filename", type=str, default="klue-sts-v1.1_dev.json") # train_arg parser.add_argument("--num_labels", type=int, default=1) parser.add_argument("--seed", type=int, default=15) parser.add_argument("--num_train_epochs", type=int, default=5) parser.add_argument("--batch_size", type=int, default=64) parser.add_argument("--learning_rate", type=float, default=5e-5) parser.add_argument("--gradient_accumulation_steps", type=int, default=1) parser.add_argument("--weight_decay", type=float, default=0.01) # eval_arg parser.add_argument("--evaluation_strategy", type=str, default="steps") parser.add_argument("--save_steps", type=int, default=250) parser.add_argument("--eval_steps", type=int, default=250) parser.add_argument("--save_total_limit", type=int, default=2) args = parser.parse_args() main(args)
37.073684
87
0.727144
0
0
0
0
0
0
0
0
468
0.132879
60edf40403e6f66a0e02912003729be0d59531bb
1,760
py
Python
test/test_base_client.py
walkr/nanoservice
e2098986b1baa5f283167ae487d14f3c6c21961a
[ "MIT" ]
28
2015-03-26T10:25:31.000Z
2022-01-31T21:59:11.000Z
test/test_base_client.py
walkr/nanoservice
e2098986b1baa5f283167ae487d14f3c6c21961a
[ "MIT" ]
3
2015-09-14T04:10:04.000Z
2020-01-29T03:52:05.000Z
test/test_base_client.py
walkr/nanoservice
e2098986b1baa5f283167ae487d14f3c6c21961a
[ "MIT" ]
9
2015-09-10T08:11:37.000Z
2020-11-08T10:41:51.000Z
import unittest from nanoservice import Responder from nanoservice import Requester class BaseTestCase(unittest.TestCase): def setUp(self): addr = 'inproc://test' self.client = Requester(addr) self.service = Responder(addr) self.service.register('divide', lambda x, y: x / y) self.service.register('echo', lambda x: x) def tearDown(self): self.client.socket.close() self.service.socket.close() class TestClient(BaseTestCase): def test_build_payload(self): payload = self.client.build_payload('echo', 'My Name') method, args, ref = payload self.assertTrue(method == 'echo') self.assertTrue(len(payload) == 3) def test_encoder(self): data = {'name': 'Joe Doe'} encoded = self.client.encode(data) decoded = self.client.decode(encoded) self.assertEqual(data, decoded) def test_call_wo_receive(self): # Requester side ops method, args = 'echo', 'hello world' payload = self.client.build_payload(method, args) self.client.socket.send(self.client.encode(payload)) # Responder side ops method, args, ref = self.service.receive() self.assertEqual(method, 'echo') self.assertEqual(args, 'hello world') self.assertEqual(ref, payload[2]) def test_basic_socket_operation(self): msg = 'abc' self.client.socket.send(msg) res = self.service.socket.recv().decode('utf-8') self.assertEqual(msg, res) def test_timeout(self): c = Requester('inproc://timeout', timeouts=(1, 1)) c.socket.send('hello') self.assertRaises(Exception, c.socket.recv) if __name__ == '__main__': unittest.main()
29.830508
62
0.630682
1,621
0.921023
0
0
0
0
0
0
190
0.107955
60f1a087b3bdc065cf389f51df6915d8dc0b8312
563
py
Python
airtech_api/flight/models.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
1
2019-04-04T12:27:55.000Z
2019-04-04T12:27:55.000Z
airtech_api/flight/models.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
34
2019-03-26T11:18:17.000Z
2022-02-10T08:12:36.000Z
airtech_api/flight/models.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
null
null
null
from airtech_api.utils.auditable_model import AuditableBaseModel from django.db import models # Create your models here. class Flight(AuditableBaseModel): class Meta: db_table = 'Flight' capacity = models.IntegerField(null=False) location = models.TextField(null=False) destination = models.TextField(null=False) schedule = models.DateTimeField(null=False) current_price = models.IntegerField() type = models.CharField( choices=(('local', 'local'), ('international', 'international')), max_length=13, )
28.15
73
0.706927
439
0.779751
0
0
0
0
0
0
78
0.138544
60f2562d19bb7ab823ff8910d39c430258f1cd35
723
py
Python
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py
kaka-lin/autonomous-driving-notes
6c1b29752d6deb679637766b6cea5c6fe5b72319
[ "MIT" ]
null
null
null
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py
kaka-lin/autonomous-driving-notes
6c1b29752d6deb679637766b6cea5c6fe5b72319
[ "MIT" ]
null
null
null
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py
kaka-lin/autonomous-driving-notes
6c1b29752d6deb679637766b6cea5c6fe5b72319
[ "MIT" ]
null
null
null
import numpy as np import matplotlib.pyplot as plt def gaussian(x, mean, std): std2 = np.power(std, 2) return (1 / np.sqrt(2* np.pi * std2)) * np.exp(-.5 * (x - mean)**2 / std2) if __name__ == "__main__": gauss_1 = gaussian(10, 8, 2) # 0.12098536225957168 gauss_2 = gaussian(10, 10, 2) # 0.19947114020071635 print("Gauss(10, 8, 2): {}".format(gauss_1)) print("Gauss(10, 10, 2): {}".format(gauss_2)) # 標準高斯分佈 mean = 0 variance = 1 std = np.sqrt(variance) # Plot between -10 and 10 with .001 steps. x = np.arange(-5, 5, 0.001) gauss = [] for i in x: gauss.append(gaussian(i, mean, std)) gauss = np.array(gauss) plt.plot(x, gauss) plt.show()
23.322581
78
0.580913
0
0
0
0
0
0
0
0
157
0.213605
60f26fe4816d83e373acca7a0999becfe86e2ce4
9,029
py
Python
part19/test_interpreter.py
fazillatheef/lsbasi
07e1a14516156a21ebe2d82e0bae4bba5ad73dd6
[ "MIT" ]
1,682
2015-06-15T11:42:03.000Z
2022-03-29T12:40:35.000Z
part19/test_interpreter.py
fazillatheef/lsbasi
07e1a14516156a21ebe2d82e0bae4bba5ad73dd6
[ "MIT" ]
10
2017-06-22T11:35:21.000Z
2022-02-26T17:37:42.000Z
part19/test_interpreter.py
fazillatheef/lsbasi
07e1a14516156a21ebe2d82e0bae4bba5ad73dd6
[ "MIT" ]
493
2015-07-05T09:05:09.000Z
2022-03-28T03:33:33.000Z
import unittest class LexerTestCase(unittest.TestCase): def makeLexer(self, text): from spi import Lexer lexer = Lexer(text) return lexer def test_tokens(self): from spi import TokenType records = ( ('234', TokenType.INTEGER_CONST, 234), ('3.14', TokenType.REAL_CONST, 3.14), ('*', TokenType.MUL, '*'), ('DIV', TokenType.INTEGER_DIV, 'DIV'), ('/', TokenType.FLOAT_DIV, '/'), ('+', TokenType.PLUS, '+'), ('-', TokenType.MINUS, '-'), ('(', TokenType.LPAREN, '('), (')', TokenType.RPAREN, ')'), (':=', TokenType.ASSIGN, ':='), ('.', TokenType.DOT, '.'), ('number', TokenType.ID, 'number'), (';', TokenType.SEMI, ';'), ('BEGIN', TokenType.BEGIN, 'BEGIN'), ('END', TokenType.END, 'END'), ('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'), ) for text, tok_type, tok_val in records: lexer = self.makeLexer(text) token = lexer.get_next_token() self.assertEqual(token.type, tok_type) self.assertEqual(token.value, tok_val) def test_lexer_exception(self): from spi import LexerError lexer = self.makeLexer('<') with self.assertRaises(LexerError): lexer.get_next_token() class ParserTestCase(unittest.TestCase): def makeParser(self, text): from spi import Lexer, Parser lexer = Lexer(text) parser = Parser(lexer) return parser def test_expression_invalid_syntax_01(self): from spi import ParserError, ErrorCode parser = self.makeParser( """ PROGRAM Test; VAR a : INTEGER; BEGIN a := 10 * ; {Invalid syntax} END. """ ) with self.assertRaises(ParserError) as cm: parser.parse() the_exception = cm.exception self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN) self.assertEqual(the_exception.token.value, ';') self.assertEqual(the_exception.token.lineno, 6) def test_expression_invalid_syntax_02(self): from spi import ParserError, ErrorCode parser = self.makeParser( """ PROGRAM Test; VAR a : INTEGER; BEGIN a := 1 (1 + 2); {Invalid syntax} END. """ ) with self.assertRaises(ParserError) as cm: parser.parse() the_exception = cm.exception self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN) self.assertEqual(the_exception.token.value, '(') self.assertEqual(the_exception.token.lineno, 6) def test_maximum_one_VAR_block_is_allowed(self): from spi import ParserError, ErrorCode # zero VARs parser = self.makeParser( """ PROGRAM Test; BEGIN END. """ ) parser.parse() # one VAR parser = self.makeParser( """ PROGRAM Test; VAR a : INTEGER; BEGIN END. """ ) parser.parse() parser = self.makeParser( """ PROGRAM Test; VAR a : INTEGER; VAR b : INTEGER; BEGIN a := 5; b := a + 10; END. """ ) with self.assertRaises(ParserError) as cm: parser.parse() the_exception = cm.exception self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN) self.assertEqual(the_exception.token.value, 'VAR') self.assertEqual(the_exception.token.lineno, 5) # second VAR class SemanticAnalyzerTestCase(unittest.TestCase): def runSemanticAnalyzer(self, text): from spi import Lexer, Parser, SemanticAnalyzer lexer = Lexer(text) parser = Parser(lexer) tree = parser.parse() semantic_analyzer = SemanticAnalyzer() semantic_analyzer.visit(tree) return semantic_analyzer def test_semantic_duplicate_id_error(self): from spi import SemanticError, ErrorCode with self.assertRaises(SemanticError) as cm: self.runSemanticAnalyzer( """ PROGRAM Test; VAR a : INTEGER; a : REAL; {Duplicate identifier} BEGIN a := 5; END. """ ) the_exception = cm.exception self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID) self.assertEqual(the_exception.token.value, 'a') self.assertEqual(the_exception.token.lineno, 5) def test_semantic_id_not_found_error(self): from spi import SemanticError, ErrorCode with self.assertRaises(SemanticError) as cm: self.runSemanticAnalyzer( """ PROGRAM Test; VAR a : INTEGER; BEGIN a := 5 + b; END. """ ) the_exception = cm.exception self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND) self.assertEqual(the_exception.token.value, 'b') class TestCallStack: def __init__(self): self._records = [] def push(self, ar): self._records.append(ar) def pop(self): # do nothing pass def peek(self): return self._records[-1] class InterpreterTestCase(unittest.TestCase): def makeInterpreter(self, text): from spi import Lexer, Parser, SemanticAnalyzer, Interpreter lexer = Lexer(text) parser = Parser(lexer) tree = parser.parse() semantic_analyzer = SemanticAnalyzer() semantic_analyzer.visit(tree) interpreter = Interpreter(tree) interpreter.call_stack = TestCallStack() return interpreter def test_integer_arithmetic_expressions(self): for expr, result in ( ('3', 3), ('2 + 7 * 4', 30), ('7 - 8 DIV 4', 5), ('14 + 2 * 3 - 6 DIV 2', 17), ('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22), ('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10), ('7 + (((3 + 2)))', 12), ('- 3', -3), ('+ 3', 3), ('5 - - - + - 3', 8), ('5 - - - + - (3 + 4) - +2', 10), ): interpreter = self.makeInterpreter( """PROGRAM Test; VAR a : INTEGER; BEGIN a := %s END. """ % expr ) interpreter.interpret() ar = interpreter.call_stack.peek() self.assertEqual(ar['a'], result) def test_float_arithmetic_expressions(self): for expr, result in ( ('3.14', 3.14), ('2.14 + 7 * 4', 30.14), ('7.14 - 8 / 4', 5.14), ): interpreter = self.makeInterpreter( """PROGRAM Test; VAR a : REAL; BEGIN a := %s END. """ % expr ) interpreter.interpret() ar = interpreter.call_stack.peek() self.assertEqual(ar['a'], result) def test_procedure_call(self): text = """\ program Main; procedure Alpha(a : integer; b : integer); var x : integer; begin x := (a + b ) * 2; end; begin { Main } Alpha(3 + 5, 7); end. { Main } """ interpreter = self.makeInterpreter(text) interpreter.interpret() ar = interpreter.call_stack.peek() self.assertEqual(ar['a'], 8) self.assertEqual(ar['b'], 7) self.assertEqual(ar['x'], 30) self.assertEqual(ar.nesting_level, 2) def test_program(self): text = """\ PROGRAM Part12; VAR number : INTEGER; a, b : INTEGER; y : REAL; PROCEDURE P1; VAR a : REAL; k : INTEGER; PROCEDURE P2; VAR a, z : INTEGER; BEGIN {P2} z := 777; END; {P2} BEGIN {P1} END; {P1} BEGIN {Part12} number := 2; a := number ; b := 10 * a + 10 * number DIV 4; y := 20 / 7 + 3.14 END. {Part12} """ interpreter = self.makeInterpreter(text) interpreter.interpret() ar = interpreter.call_stack.peek() self.assertEqual(len(ar.members.keys()), 4) self.assertEqual(ar['number'], 2) self.assertEqual(ar['a'], 2) self.assertEqual(ar['b'], 25) self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971... if __name__ == '__main__': unittest.main()
27.95356
84
0.507144
8,949
0.99114
0
0
0
0
0
0
2,517
0.278768
60f465712817804b710d37a55f88faebafc8ed3c
1,675
py
Python
bot_components/configurator.py
Ferlern/Arctic-Tundra
407b8c38c31f6c930df662e87ced527b9fd26c61
[ "MIT" ]
3
2021-11-05T20:22:05.000Z
2022-02-14T12:12:31.000Z
bot_components/configurator.py
Ferlern/Arctic-Tundra
407b8c38c31f6c930df662e87ced527b9fd26c61
[ "MIT" ]
null
null
null
bot_components/configurator.py
Ferlern/Arctic-Tundra
407b8c38c31f6c930df662e87ced527b9fd26c61
[ "MIT" ]
null
null
null
import json from typing import TypedDict from .bot_emoji import AdditionalEmoji class Warn(TypedDict): text: str mute_time: int ban: bool class PersonalVoice(TypedDict): categoty: int price: int slot_price: int bitrate_price: int class System(TypedDict): token: str initial_extensions: list[str] class ExperienceSystem(TypedDict): experience_channel: int cooldown: int minimal_message_length: int experience_per_message: list[int] roles: dict[str, int] coins_per_level_up: int class AutoTranslation(TypedDict): channels: list lang: str class Config(TypedDict): guild: int token: str prefixes: list[str] commands_channels: list[int] mute_role: int suggestions_channel: int moderators_roles: list[int] warns_system: list[Warn] coin: str daily: int marry_price: int personal_voice: PersonalVoice experience_system: ExperienceSystem auto_translation: AutoTranslation additional_emoji: AdditionalEmoji class Configurator: def __init__(self) -> None: self.system: System self.config: Config def dump(self): with open("./bot_components/config.json", "w") as write_file: to_dump = [self.system, self.config] json.dump(to_dump, write_file, indent=4) def load(self): with open("./bot_components/config.json", "r") as write_file: data = json.load(write_file) self.system = System(data[0]) self.config = Config(data[1]) def reload(self): self.dump() self.load() configurator = Configurator() configurator.load()
21.202532
69
0.666269
1,518
0.906269
0
0
0
0
0
0
66
0.039403
60f6336cf78fe6b4d87ec03f5cf82a49ade1394b
3,493
py
Python
recnn/utils/plot.py
ihash5/reinforcement-learning
c72e9db33c6ed6abd34e9f48012189369b7cd5d0
[ "Apache-2.0" ]
1
2021-04-10T08:21:21.000Z
2021-04-10T08:21:21.000Z
recnn/utils/plot.py
ihash5/reinforcement-learning
c72e9db33c6ed6abd34e9f48012189369b7cd5d0
[ "Apache-2.0" ]
null
null
null
recnn/utils/plot.py
ihash5/reinforcement-learning
c72e9db33c6ed6abd34e9f48012189369b7cd5d0
[ "Apache-2.0" ]
null
null
null
from scipy.spatial import distance from scipy import ndimage import matplotlib.pyplot as plt import torch from scipy import stats import numpy as np def pairwise_distances_fig(embs): embs = embs.detach().cpu().numpy() similarity_matrix_cos = distance.cdist(embs, embs, 'cosine') similarity_matrix_euc = distance.cdist(embs, embs, 'euclidean') fig = plt.figure(figsize=(16,10)) ax = fig.add_subplot(121) cax = ax.matshow(similarity_matrix_cos) fig.colorbar(cax) ax.set_title('Cosine') ax.axis('off') ax = fig.add_subplot(122) cax = ax.matshow(similarity_matrix_euc) fig.colorbar(cax) ax.set_title('Euclidian') ax.axis('off') fig.suptitle('Action pairwise distances') plt.close() return fig def pairwise_distances(embs): fig = pairwise_distances_fig(embs) fig.show() def smooth(scalars, weight): # Weight between 0 and 1 last = scalars[0] # First value in the plot (first timestep) smoothed = list() for point in scalars: smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value smoothed.append(smoothed_val) # Save it last = smoothed_val # Anchor the last smoothed value return smoothed def smooth_gauss(arr, var): return ndimage.gaussian_filter1d(arr, var) class Plotter: def __init__(self, loss, style): self.loss = loss self.style = style self.smoothing = lambda x: smooth_gauss(x, 4) def set_smoothing_func(self, f): self.smoothing = f def plot_loss(self): for row in self.style: fig, axes = plt.subplots(1, len(row), figsize=(16, 6)) if len(row) == 1: axes = [axes] for col in range(len(row)): key = row[col] axes[col].set_title(key) axes[col].plot(self.loss['train']['step'], self.smoothing(self.loss['train'][key]), 'b-', label='train') axes[col].plot(self.loss['test']['step'], self.loss['test'][key], 'r-.', label='test') plt.legend() plt.show() def log_loss(self, key, item, test=False): kind = 'train' if test: kind = 'test' self.loss[kind][key].append(item) def log_losses(self, losses, test=False): for key, val in losses.items(): self.log_loss(key, val, test) @staticmethod def kde_reconstruction_error(ad, gen_actions, true_actions, device=torch.device('cpu')): def rec_score(actions): return ad.rec_error(torch.tensor(actions).to(device).float()).detach().cpu().numpy() true_scores = rec_score(true_actions) gen_scores = rec_score(gen_actions) true_kernel = stats.gaussian_kde(true_scores) gen_kernel = stats.gaussian_kde(gen_scores) x = np.linspace(0, 1000, 100) probs_true = true_kernel(x) probs_gen = gen_kernel(x) fig = plt.figure(figsize=(16, 10)) ax = fig.add_subplot(111) ax.plot(x, probs_true, '-b', label='true dist') ax.plot(x, probs_gen, '-r', label='generated dist') ax.legend() return fig @staticmethod def plot_kde_reconstruction_error(*args, **kwargs): fig = Plotter.kde_reconstruction_error(*args, **kwargs) fig.show()
30.373913
96
0.590896
2,118
0.606356
0
0
927
0.265388
0
0
321
0.091898
60f7e54acc60354d75596811ff04f18911fc24eb
6,362
py
Python
tests/integration/insights/v1/call/test_metric.py
pazzy-stack/twilio
d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0
[ "MIT" ]
null
null
null
tests/integration/insights/v1/call/test_metric.py
pazzy-stack/twilio
d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0
[ "MIT" ]
null
null
null
tests/integration/insights/v1/call/test_metric.py
pazzy-stack/twilio
d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0
[ "MIT" ]
null
null
null
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from tests import IntegrationTestCase from tests.holodeck import Request from twilio.base.exceptions import TwilioException from twilio.http.response import Response class MetricTestCase(IntegrationTestCase): def test_list_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .metrics.list() self.holodeck.assert_has_request(Request( 'get', 'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics', )) def test_read_response(self): self.holodeck.mock(Response( 200, ''' { "meta": { "page": 0, "page_size": 50, "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0", "previous_page_url": null, "next_page_url": null, "key": "metrics", "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0" }, "metrics": [ { "timestamp": "2019-10-07T22:32:06Z", "call_sid": "CA7569efe0253644fa4a88aa97beca3310", "account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9", "edge": "sdk_edge", "direction": "both", "sdk_edge": { "interval": { "packets_received": 50, "packets_lost": 0, "audio_in": { "value": 81.0 }, "audio_out": { "value": 5237.0 }, "jitter": { "value": 9 }, "mos": { "value": 4.39 }, "rtt": { "value": 81 } }, "cumulative": { "bytes_received": 547788, "bytes_sent": 329425, "packets_received": 3900, "packets_lost": 0, "packets_sent": 3934 } }, "client_edge": null, "carrier_edge": null, "sip_edge": null, "gateway": null, "client": null } ] } ''' )) actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .metrics.list() self.assertIsNotNone(actual) def test_read_full_response(self): self.holodeck.mock(Response( 200, ''' { "meta": { "page": 10, "page_size": 5, "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0", "previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10", "next_page_url": null, "key": "metrics", "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10" }, "metrics": [ { "timestamp": "2019-10-07T22:32:06Z", "call_sid": "CA7569efe0253644fa4a88aa97beca3310", "account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9", "edge": "sdk_edge", "direction": "both", "sdk_edge": { "interval": { "packets_received": 50, "packets_lost": 0, "audio_in": { "value": 81.0 }, "audio_out": { "value": 5237.0 }, "jitter": { "value": 9 }, "mos": { "value": 4.39 }, "rtt": { "value": 81 } }, "cumulative": { "bytes_received": 547788, "bytes_sent": 329425, "packets_received": 3900, "packets_lost": 0, "packets_sent": 3934 } }, "client_edge": null, "carrier_edge": null, "sip_edge": null, "gateway": null, "client": null } ] } ''' )) actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .metrics.list() self.assertIsNotNone(actual)
40.265823
185
0.365923
6,072
0.954417
0
0
0
0
0
0
5,298
0.832757
60f8300c3b1d0bfc0e3ab0efad7d54c27160ef0c
1,053
py
Python
2017-2018/lecture-notes/python/02-algorithms_listing_8_contains_word.py
essepuntato/comp-think
3dac317bda0eb7650adc4a92c1ccb8a4ce87a3a6
[ "BSD-2-Clause" ]
19
2017-07-03T11:55:33.000Z
2021-10-17T10:21:24.000Z
2017-2018/lecture-notes/python/02-algorithms_listing_8_contains_word.py
essepuntato/comp-think
3dac317bda0eb7650adc4a92c1ccb8a4ce87a3a6
[ "BSD-2-Clause" ]
1
2017-12-21T10:52:56.000Z
2018-06-06T13:59:13.000Z
2017-2018/lecture-notes/python/02-algorithms_listing_8_contains_word.py
essepuntato/comp-think
3dac317bda0eb7650adc4a92c1ccb8a4ce87a3a6
[ "BSD-2-Clause" ]
4
2017-11-13T09:29:06.000Z
2019-05-09T03:29:49.000Z
def contains_word(first_word, second_word, bibliographic_entry): contains_first_word = first_word in bibliographic_entry contains_second_word = second_word in bibliographic_entry if contains_first_word and contains_second_word: return 2 elif contains_first_word or contains_second_word: return 1 else: return 0 if __name__ == "__main__": bibliographic_entry = "Peroni, S., Osborne, F., Di Iorio, A., Nuzzolese, A. G., Poggi, F., Vitali, F., " \ "Motta, E. (2017). Research Articles in Simplified HTML: a Web-first format for " \ "HTML-based scholarly articles. PeerJ Computer Science 3: e132. e2513. " \ "DOI: https://doi.org/10.7717/peerj-cs.132" print(contains_word("Peroni", "Osborne", bibliographic_entry)) print(contains_word("Peroni", "Asprino", bibliographic_entry)) print(contains_word("Reforgiato", "Osborne", bibliographic_entry)) print(contains_word("Reforgiato", "Asprino", bibliographic_entry))
47.863636
110
0.674264
0
0
0
0
0
0
0
0
364
0.345679
60f86292339dc07295795b770971581f3a845840
2,855
py
Python
backend/user/scripter.py
ivaivalous/ivodb
e9b0969225fdb725d35a2ecfab21f87d1d9b2a00
[ "MIT" ]
null
null
null
backend/user/scripter.py
ivaivalous/ivodb
e9b0969225fdb725d35a2ecfab21f87d1d9b2a00
[ "MIT" ]
null
null
null
backend/user/scripter.py
ivaivalous/ivodb
e9b0969225fdb725d35a2ecfab21f87d1d9b2a00
[ "MIT" ]
null
null
null
#!/usr/bin/env python import responses from selenium import webdriver # This file contains/references the default JS # used to provide functions dealing with input/output SCRIPT_RUNNER = "runner.html" ENCODING = 'utf-8' PAGE_LOAD_TIMEOUT = 5 PAGE_LOAD_TIMEOUT_MS = PAGE_LOAD_TIMEOUT * 1000 capabilities = webdriver.DesiredCapabilities.PHANTOMJS capabilities["phantomjs.page.settings.resourceTimeout"] = PAGE_LOAD_TIMEOUT_MS capabilities["phantomjs.page.settings.loadImages"] = False SCRIPT_TEMPLATE = """ window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}}; window.method = requestData.method; window.headers = requestData.headers; window.data = requestData.data; window.params = requestData.params; window.logs = []; window.log = function(message) {{ window.logs.push({{ "time": (new Date).getTime(), "message": message }}) }}; """ GET_LOGS_SCRIPT = 'return window.logs;' class Scripter: def __init__(self): self.driver = webdriver.PhantomJS(desired_capabilities=capabilities) self.driver.implicitly_wait(PAGE_LOAD_TIMEOUT) self.driver.set_page_load_timeout(PAGE_LOAD_TIMEOUT) def run(self, request, script_body, input_params): self.driver.get(SCRIPT_RUNNER) self.driver.execute_script( Scripter.build_runner_script(request, input_params)) try: response = self.execute_user_script(script_body) logs = self.driver.execute_script(GET_LOGS_SCRIPT) return response.encode(ENCODING), logs except: return responses.get_invalid_request(), [] def execute_user_script(self, script_body): """Execute a user-contributed script.""" return self.driver.execute_script(script_body) @staticmethod def build_runner_script(request, input_params): # Build JS related to having access to input # and request data. return SCRIPT_TEMPLATE.format( request.method, Scripter.build_headers_map(request.headers), request.get_data().encode(ENCODING), Scripter.build_params_map(input_params.encode(ENCODING))) @staticmethod def build_params_map(input_params): # input_params looks like "test=aaa&test2=jjj" couples = input_params.split("&") params_map = {} for couple in couples: c = couple.split("=") key = c[0] value = c[1] if len(c) > 1 else "" params_map[key] = value return params_map @staticmethod def build_headers_map(headers): headers_map = {} for key, value in headers: if 'jwt=' in value: continue headers_map[key] = value.encode(ENCODING) return headers_map
31.032609
79
0.652539
1,879
0.658144
0
0
1,011
0.354116
0
0
827
0.289667
60f89950c5e281ca1dca03166764cafad747aec6
1,490
py
Python
bwtougu/api/names.py
luhouxiang/byrobot
e110e7865965a344d2b61cb925c959cee1387758
[ "Apache-2.0" ]
null
null
null
bwtougu/api/names.py
luhouxiang/byrobot
e110e7865965a344d2b61cb925c959cee1387758
[ "Apache-2.0" ]
null
null
null
bwtougu/api/names.py
luhouxiang/byrobot
e110e7865965a344d2b61cb925c959cee1387758
[ "Apache-2.0" ]
1
2018-09-28T08:59:38.000Z
2018-09-28T08:59:38.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- VALID_HISTORY_FIELDS = [ 'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume', 'acc_net_value', 'discount_rate', 'unit_net_value', 'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement' ] VALID_GET_PRICE_FIELDS = [ 'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded', 'AccNetValue', 'UnitNetValue', 'DiscountRate', 'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx' ] VALID_TENORS = [ '0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y', '5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y', '40Y', '50Y' ] VALID_INSTRUMENT_TYPES = [ 'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu', 'Stock', 'Fund', 'Index' ] VALID_XUEQIU_FIELDS = [ 'new_comments', 'total_comments', 'new_followers', 'total_followers', 'sell_actions', 'buy_actions', ] VALID_MARGIN_FIELDS = [ 'margin_balance', 'buy_on_margin_value', 'short_sell_quantity', 'margin_repayment', 'short_balance_quantity', 'short_repayment_quantity', 'short_balance', 'total_balance' ] VALID_SHARE_FIELDS = [ 'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a' ] VALID_TURNOVER_FIELDS = ( 'today', 'week', 'month', 'three_month', 'six_month', 'year', 'current_year', 'total', )
25.689655
94
0.619463
0
0
0
0
0
0
0
0
971
0.651678
60f8eaaad292345c3bccc32bf4de41d8a5ec6e07
9,830
py
Python
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
dveni/causal-text-embeddings
82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a
[ "MIT" ]
114
2019-05-31T03:54:05.000Z
2022-03-28T06:37:27.000Z
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
dveni/causal-text-embeddings
82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a
[ "MIT" ]
7
2019-08-12T01:35:22.000Z
2020-09-23T17:32:46.000Z
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
dveni/causal-text-embeddings
82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a
[ "MIT" ]
20
2019-06-03T05:33:10.000Z
2022-02-04T19:34:41.000Z
""" Simple pre-processing for PeerRead papers. Takes in JSON formatted data from ScienceParse and outputs a tfrecord Reference example: https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py """ import argparse import glob import os import random import io import json from dateutil.parser import parse as parse_date import tensorflow as tf import bert.tokenization as tokenization from PeerRead.ScienceParse.Paper import Paper from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features rng = random.Random(0) def process_json_paper(paper_json_filename, scienceparse_dir, tokenizer): paper = Paper.from_json(paper_json_filename) paper.SCIENCEPARSE = ScienceParseReader.read_science_parse(paper.ID, paper.TITLE, paper.ABSTRACT, scienceparse_dir) # tokenize PeerRead features try: title_tokens = tokenizer.tokenize(paper.TITLE) except ValueError: # missing titles are quite common sciparse print("Missing title for " + paper_json_filename) title_tokens = None abstract_tokens = tokenizer.tokenize(paper.ABSTRACT) text_features = {'title': title_tokens, 'abstract': abstract_tokens} context_features = {'authors': paper.AUTHORS, 'accepted': paper.ACCEPTED, 'name': paper.ID} # add hand crafted features from PeerRead pr_hand_features = get_PeerRead_hand_features(paper) context_features.update(pr_hand_features) return text_features, context_features def bert_process_sentence(example_tokens, max_seq_length, tokenizer): """ Tokenization and pre-processing of text as expected by Bert Parameters ---------- example_tokens max_seq_length tokenizer Returns ------- """ # Account for [CLS] and [SEP] with "- 2" if len(example_tokens) > max_seq_length - 2: example_tokens = example_tokens[0:(max_seq_length - 2)] # The convention in BERT for single sequences is: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. (vv: Not relevant for us) # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. # vv: segment_ids seem to be the same as type_ids tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in example_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length return input_ids, input_mask, segment_ids def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer): """ Parses the input paper into a tf.Example as expected by Bert Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯ """ abstract_features = {} abstract_tokens, abstract_padding_mask, _ = \ bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer) abstract_features["token_ids"] = _int64_feature(abstract_tokens) abstract_features["token_mask"] = _int64_feature(abstract_padding_mask) # abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs # abstract_features["label_ids"] = _int64_feature([feature.label_id]) # non-sequential features tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features) features = {**tf_context_features, **abstract_features} tf_example = tf.train.Example(features=tf.train.Features(feature=features)) return tf_example def _int64_feature(value): """Wrapper for inserting an int64 Feature into a SequenceExample proto, e.g, An integer label. """ if isinstance(value, list): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) else: return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float_feature(value): """Wrapper for inserting a float Feature into a SequenceExample proto, e.g, An integer label. """ if isinstance(value, list): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) else: return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _bytes_feature(value): """Wrapper for inserting a bytes Feature into a SequenceExample proto, e.g, an image in byte """ # return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _dict_of_nonlist_numerical_to_tf_features(my_dict): """ Strip out non-numerical features Returns tf_features_dict: a dictionary suitable for passing to tf.train.example tf_types_dict: a dictionary of the tf types of previous dict """ tf_types_dict = {} tf_features_dict = {} for k, v in my_dict.items(): if isinstance(v, int) or isinstance(v, bool): tf_features_dict[k] = _int64_feature(v) tf_types_dict[k] = tf.int64 elif isinstance(v, float): tf_features_dict[k] = _float_feature(v) tf_types_dict[k] = tf.float32 else: pass return tf_features_dict, tf_types_dict venues = {'acl': 1, 'conll': 2, 'iclr': 3, 'nips': 4, 'icml': 5, 'emnlp': 6, 'aaai': 7, 'hlt-naacl': 8, 'arxiv': 0} def _venues(venue_name): if venue_name.lower() in venues: return venues[venue_name.lower()] else: return -1 def _arxiv_subject(subjects): subject = subjects[0] if 'lg' in subject.lower(): return 0 elif 'cl' in subject.lower(): return 1 elif 'ai' in subject.lower(): return 2 else: raise Exception("arxiv subject not recognized") def clean_PeerRead_dataset(review_json_dir, parsedpdf_json_dir, venue, year, out_dir, out_file, max_abs_len, tokenizer, default_accept=1, is_arxiv = False): if not os.path.exists(out_dir): os.makedirs(out_dir) print('Reading reviews from...', review_json_dir) paper_json_filenames = sorted(glob.glob('{}/*.json'.format(review_json_dir))) with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer: for idx, paper_json_filename in enumerate(paper_json_filenames): text_features, context_features = process_json_paper(paper_json_filename, parsedpdf_json_dir, tokenizer) if context_features['accepted'] is None: # missing for conferences other than ICLR (we only see accepts) context_features['accepted'] = default_accept many_split = rng.randint(0, 100) # useful for easy data splitting later # other context features arxiv = -1 if is_arxiv: with io.open(paper_json_filename) as json_file: loaded = json.load(json_file) year = parse_date(loaded['DATE_OF_SUBMISSION']).year venue = _venues(loaded['conference']) arxiv = _arxiv_subject([loaded['SUBJECTS']]) extra_context = {'id': idx, 'venue': venue, 'year': year, 'many_split': many_split, 'arxiv': arxiv} context_features.update(extra_context) # turn it into a tf.data example paper_ex = paper_to_bert_Example(text_features, context_features, max_seq_length=max_abs_len, tokenizer=tokenizer) writer.write(paper_ex.SerializeToString()) def main(): parser = argparse.ArgumentParser() parser.add_argument('--review-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/reviews') parser.add_argument('--parsedpdf-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/parsed_pdfs') parser.add_argument('--out-dir', type=str, default='../dat/PeerRead/proc') parser.add_argument('--out-file', type=str, default='arxiv-all.tf_record') parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt') parser.add_argument('--max-abs-len', type=int, default=250) parser.add_argument('--venue', type=int, default=0) parser.add_argument('--year', type=int, default=2017) args = parser.parse_args() tokenizer = tokenization.FullTokenizer( vocab_file=args.vocab_file, do_lower_case=True) clean_PeerRead_dataset(args.review_json_dir, args.parsedpdf_json_dir, args.venue, args.year, args.out_dir, args.out_file, args.max_abs_len, tokenizer, is_arxiv=True) if __name__ == "__main__": main()
33.896552
132
0.662665
0
0
0
0
0
0
0
0
2,975
0.302522
60f9b711d1be6899f182566e81c346882c7e82b3
19
py
Python
app/packageB/__init__.py
An7ar35/python-app-skeleton-structure
9060411bd32840c6510ad8fe18dcdc097c07b511
[ "MIT" ]
null
null
null
app/packageB/__init__.py
An7ar35/python-app-skeleton-structure
9060411bd32840c6510ad8fe18dcdc097c07b511
[ "MIT" ]
null
null
null
app/packageB/__init__.py
An7ar35/python-app-skeleton-structure
9060411bd32840c6510ad8fe18dcdc097c07b511
[ "MIT" ]
null
null
null
__all__=['module1']
19
19
0.736842
0
0
0
0
0
0
0
0
9
0.473684
60fad164495b1d30558324f3422b6ab9ad4d496c
5,268
py
Python
lib/shop.py
ZakDoesGaming/OregonTrail
90cab35536ac5c6ba9e772ac5c29c914017c9c23
[ "MIT" ]
6
2018-05-07T04:04:58.000Z
2021-05-15T17:44:16.000Z
lib/shop.py
ZakDoesGaming/OregonTrail
90cab35536ac5c6ba9e772ac5c29c914017c9c23
[ "MIT" ]
null
null
null
lib/shop.py
ZakDoesGaming/OregonTrail
90cab35536ac5c6ba9e772ac5c29c914017c9c23
[ "MIT" ]
2
2017-05-27T17:06:23.000Z
2020-08-26T17:57:10.000Z
from pygame import Surface, font from copy import copy from random import randint, choice import string from lib.transactionButton import TransactionButton SHOP_PREFIX = ["archer", "baker", "fisher", "miller", "rancher", "robber"] SHOP_SUFFIX = ["cave", "creek", "desert", "farm", "field", "forest", "hill", "lake", "mountain", "pass", "valley", "woods"] class Shop(): def __init__(self, name, inventory, priceModifier, groupInventory, groupMoney, itemPrices, position, blitPosition, money, resourcePath): self.yValue = 40 self.groupInventory = groupInventory self.groupMoney = groupMoney self.priceModifier = priceModifier self.itemPrices = itemPrices self.inventory = inventory self.position = position self.blitPosition = blitPosition self.resourcePath = resourcePath self.buyButtonList = [] self.sellButtonList = [] self.xPos = (-self.position * 40) + 1280 self.shopSurface = Surface((500, 300)).convert() self.sepLine = Surface((self.shopSurface.get_width(), 10)).convert() self.sepLine.fill((0, 0, 0)) self.invContainer = Surface((self.shopSurface.get_width() - 20, self.shopSurface.get_height() / 2 - 35)).convert() self.invContainer.fill((255, 255, 255)) self.titleFont = font.Font("res/fonts/west.ttf", 17) self.textFont = font.Font("res/fonts/west.ttf", 15) if (name == ""): self.name = (choice(SHOP_PREFIX) + "'s " + choice(SHOP_SUFFIX)).capitalize() else: self.name = name if (self.inventory == {}): inventoryRandom = copy(self.groupInventory) for key in list(inventoryRandom.keys()): inventoryRandom[key] = randint(0, 10) inventoryRandom["Food"] *= 20 self.inventory = inventoryRandom if (money is None): self.money = randint(200, 500) else: self.name = name self.render() def get_surface(self): self.render() return self.shopSurface def update(self, groupInv, groupMoney): self.groupInventory = groupInv self.groupMoney = groupMoney self.render() def move(self, moveValue): self.xPos += (2 * moveValue) self.render() def render(self): self.yValue = 40 self.shopSurface.fill((133, 94, 66)) self.shopSurface.blit(self.titleFont.render(self.name + " - $" + str(self.money), 1, (0, 0, 255)), (10, 5)) self.shopSurface.blit(self.invContainer, (10, 25)) self.shopSurface.blit(self.invContainer, (10, self.shopSurface.get_height() / 2 + 30)) self.shopSurface.blit(self.textFont.render("Inventory", 1, (255, 0, 0)), (10, 25)) self.shopSurface.blit(self.textFont.render("Amount", 1, (255, 0, 0)), (130, 25)) self.shopSurface.blit(self.textFont.render("Price", 1, (255, 0, 0)), (200, 25)) for key in list(self.inventory.keys()): self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue)) self.shopSurface.blit(self.textFont.render(str(self.inventory[key]), 1, (0, 0, 0)), (150, self.yValue)) self.shopSurface.blit(self.textFont.render("$"+str(self.itemPrices[key] * self.priceModifier), 1, (0, 0, 0)), (200, self.yValue)) if (len(self.buyButtonList) < len(self.inventory.keys())): buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue)))) self.buyButtonList.append(TransactionButton(transaction = "buy", item = key, imagePosition = (250, self.yValue), rectPosition = buttonPos, resourcePath = self.resourcePath)) self.yValue += 30 for button in self.buyButtonList: self.shopSurface.blit(button.image, button.imagePosition) self.shopSurface.blit(self.sepLine, (0, float(self.shopSurface.get_height()) / 2)) self.shopSurface.blit(self.titleFont.render("You - $" + str(self.groupMoney), 1, (0, 0, 255)), (10, float(self.shopSurface.get_height()) / 2 + 10)) self.shopSurface.blit(self.titleFont.render("Inventory", 1, (255, 0, 0)), (10, float(self.shopSurface.get_height()) / 2 + 30)) self.shopSurface.blit(self.titleFont.render("Amount", 1, (255, 0, 0)), (130, float(self.shopSurface.get_height()) / 2 + 30)) self.shopSurface.blit(self.titleFont.render("Price", 1, (255, 0, 0)), (200, float(self.shopSurface.get_height()) / 2 + 30)) self.yValue = (float(self.shopSurface.get_height()) / 2) + 45 for key in list(self.groupInventory.keys()): self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue)) self.shopSurface.blit(self.textFont.render(str(self.groupInventory[key]), 1, (0, 0, 0)), (150, self.yValue)) self.shopSurface.blit(self.textFont.render("$" + str(self.itemPrices[key] * self.priceModifier), 1, (0, 0, 0)), (200, self.yValue)) if (len(self.sellButtonList) < len(self.inventory.keys())): buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue)))) self.sellButtonList.append(TransactionButton(transaction = "sell", item = key, imagePosition = (250, self.yValue), rectPosition = buttonPos, resourcePath = self.resourcePath)) self.yValue += 30 for button in self.sellButtonList: self.shopSurface.blit(button.image, button.imagePosition)
41.480315
137
0.650721
4,882
0.926727
0
0
0
0
0
0
276
0.052392
60fb27a39e6c08f8aae7d5554b69bcd58cf5b1d9
2,047
py
Python
core/dataflow/test/test_runners.py
ajmal017/amp
8de7e3b88be87605ec3bad03c139ac64eb460e5c
[ "BSD-3-Clause" ]
null
null
null
core/dataflow/test/test_runners.py
ajmal017/amp
8de7e3b88be87605ec3bad03c139ac64eb460e5c
[ "BSD-3-Clause" ]
null
null
null
core/dataflow/test/test_runners.py
ajmal017/amp
8de7e3b88be87605ec3bad03c139ac64eb460e5c
[ "BSD-3-Clause" ]
null
null
null
import logging import numpy as np import core.dataflow as dtf import helpers.unit_test as hut _LOG = logging.getLogger(__name__) class TestRollingFitPredictDagRunner(hut.TestCase): def test1(self) -> None: """ Test the DagRunner using `ArmaReturnsBuilder` """ dag_builder = dtf.ArmaReturnsBuilder() config = dag_builder.get_config_template() dag_builder.get_dag(config) # dag_runner = dtf.RollingFitPredictDagRunner( config=config, dag_builder=dag_builder, start="2010-01-04 09:30", end="2010-01-04 15:30", retraining_freq="H", retraining_lookback=4, ) result_bundles = list(dag_runner.fit_predict()) np.testing.assert_equal(len(result_bundles), 2) class TestIncrementalDagRunner(hut.TestCase): def test1(self) -> None: """ Test the DagRunner using `ArmaReturnsBuilder` """ dag_builder = dtf.ArmaReturnsBuilder() config = dag_builder.get_config_template() # Create DAG and generate fit state. dag = dag_builder.get_dag(config) dag.run_leq_node("rets/clip", "fit") fit_state = dtf.get_fit_state(dag) # dag_runner = dtf.IncrementalDagRunner( config=config, dag_builder=dag_builder, start="2010-01-04 15:30", end="2010-01-04 15:45", freq="5T", fit_state=fit_state, ) result_bundles = list(dag_runner.predict()) self.assertEqual(len(result_bundles), 4) # Check that dataframe results of `col` do not retroactively change # over successive prediction steps (which would suggest future # peeking). col = "vwap_ret_0_vol_2_hat" for rb_i, rb_i_next in zip(result_bundles[:-1], result_bundles[1:]): srs_i = rb_i.result_df[col] srs_i_next = rb_i_next.result_df[col] self.assertTrue(srs_i.compare(srs_i_next[:-1]).empty)
31.984375
76
0.616512
1,909
0.932584
0
0
0
0
0
0
433
0.211529
60fb78d100400013bb9e1879a2d59065d01b4f6b
4,478
py
Python
Main Project/Main_Program.py
hmnk-1967/OCR-Python-Project-CS-BUIC
28c72d9913a25655f6183a7b960e527a0432c8e1
[ "MIT" ]
null
null
null
Main Project/Main_Program.py
hmnk-1967/OCR-Python-Project-CS-BUIC
28c72d9913a25655f6183a7b960e527a0432c8e1
[ "MIT" ]
null
null
null
Main Project/Main_Program.py
hmnk-1967/OCR-Python-Project-CS-BUIC
28c72d9913a25655f6183a7b960e527a0432c8e1
[ "MIT" ]
null
null
null
import tkinter.messagebox from tkinter import * import tkinter as tk from tkinter import filedialog import numpy import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract. import cv2 from PIL import Image, ImageTk import os root = tk.Tk() root.title("Object Character Recognizer") root.geometry("1280x720") test_image = None def browse_image(): fin = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select Image File", filetypes=(("PNG Files", "*.png"), ("JPG Files", "*.jpg"), ("All Files", "*.*"))) global test_image image = Image.open(fin) test_image = image img = ImageTk.PhotoImage(image.resize((650, 400))) lb = tk.Label(image=img) lb.place(x=25, y=50) root.mainloop() def use_ocr_default(): try: global test_image messge = None #OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode. #OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match #the available data with the testing data). #PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the #data from the image. tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 3') label = Label(messge, text='Result:') label.place(x=850, y=320) display_message = Text(messge, width=46, height=15) display_message.insert(END, str(tess)) display_message.config(state=DISABLED) display_message.delete(0, END) display_message.place(x=890, y=330) except: #Print a error message when the user inputs an incompatible image. tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.') def use_ocr_handwriting(): try: global test_image opencv_img = numpy.array(test_image) opencv_img = opencv_img[:, :, ::-1].copy() #This line is used to convert RGB PIL image file to BGR cv2 image file. blurred_img = cv2.medianBlur(opencv_img, 5) gray_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2GRAY) thresh, binary = cv2.threshold(gray_img, 122, 255, cv2.THRESH_BINARY) messge = None tess = pytesseract.image_to_string(binary, config='-l eng --oem 1 --psm 3') label = Label(messge, text='Result:') label.place(x=850, y=320) display_message = Text(messge, width=46, height=15) display_message.insert(END, str(tess)) display_message.config(state=DISABLED) display_message.delete(0, END) display_message.place(x=890, y=330) except: tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.') def use_ocr_singletext(): try: global test_image messge = None tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 7') label = Label(messge, text='Result:') label.place(x=850, y=320) display_message = Text(messge, width=46, height=15) display_message.insert(END, str(tess)) display_message.config(state=DISABLED) display_message.delete(0, END) display_message.place(x=890, y=330) except: tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.') w = tk.LabelFrame(root, text="Image:", width=768, height=600) w.place(x=20, y=10) w.pack_propagate(0) w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310) w1.place(x=800, y=300) w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280) w2.place(x=800, y=10) btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image) btn1.place(x=22, y=20) btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting) btn1.place(x=22, y=80) btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default) btn1.place(x=22, y=140) btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext) btn1.place(x=22, y=200) root.mainloop()
44.336634
209
0.692497
0
0
0
0
0
0
0
0
1,399
0.312416
60fbe7657269c996fc3948f64ef4c1f61caa8318
881
bzl
Python
third_party/nasm/workspace.bzl
wainshine/tensorflow
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
[ "Apache-2.0" ]
54
2017-06-17T14:07:48.000Z
2022-03-29T02:11:20.000Z
third_party/nasm/workspace.bzl
wainshine/tensorflow
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
[ "Apache-2.0" ]
19
2021-12-28T12:44:55.000Z
2022-01-13T08:11:28.000Z
third_party/nasm/workspace.bzl
wainshine/tensorflow
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
[ "Apache-2.0" ]
11
2018-04-19T22:36:01.000Z
2021-08-02T08:44:43.000Z
"""loads the nasm library, used by TF.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): tf_http_archive( name = "nasm", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2", "http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.13.03.tar.bz2/sha512/d7a6b4cee8dfd603d8d4c976e5287b5cc542fa0b466ff989b743276a6e28114e64289bf02a7819eca63142a5278aa6eed57773007e5f589e15768e6456a8919d/nasm-2.13.03.tar.bz2", "http://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2", ], sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011", strip_prefix = "nasm-2.13.03", build_file = "//third_party/nasm:nasm.BUILD", system_build_file = "//third_party/nasm:BUILD.system", )
48.944444
237
0.694665
0
0
0
0
0
0
0
0
646
0.733258
60fc4c563a78f035d637363b6ec3e80079aa3d28
10,095
py
Python
python/tests/test-1-vector.py
wence-/libCEED
c785ad36304ed34c5edefb75cf1a0fe5445db17b
[ "BSD-2-Clause" ]
null
null
null
python/tests/test-1-vector.py
wence-/libCEED
c785ad36304ed34c5edefb75cf1a0fe5445db17b
[ "BSD-2-Clause" ]
null
null
null
python/tests/test-1-vector.py
wence-/libCEED
c785ad36304ed34c5edefb75cf1a0fe5445db17b
[ "BSD-2-Clause" ]
null
null
null
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at # the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights # reserved. See files LICENSE and NOTICE for details. # # This file is part of CEED, a collection of benchmarks, miniapps, software # libraries and APIs for efficient high-order finite element and spectral # element discretizations for exascale applications. For more information and # source code availability see http://github.com/ceed. # # The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, # a collaborative effort of two U.S. Department of Energy organizations (Office # of Science and the National Nuclear Security Administration) responsible for # the planning and preparation of a capable exascale ecosystem, including # software, applications, hardware, advanced system engineering and early # testbed platforms, in support of the nation's exascale computing imperative. # @file # Test Ceed Vector functionality import os import libceed import numpy as np import check TOL = libceed.EPSILON * 256 # ------------------------------------------------------------------------------- # Utility # ------------------------------------------------------------------------------- def check_values(ceed, x, value): with x.array_read() as b: for i in range(len(b)): assert b[i] == value # ------------------------------------------------------------------------------- # Test creation, setting, reading, restoring, and destroying of a vector # ------------------------------------------------------------------------------- def test_100(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) with x.array_read() as b: for i in range(n): assert b[i] == 10 + i # ------------------------------------------------------------------------------- # Test setValue # ------------------------------------------------------------------------------- def test_101(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) value = 1 a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) with x.array() as b: for i in range(len(b)): assert b[i] == 10 + i x.set_value(3.0) check_values(ceed, x, 3.0) del x x = ceed.Vector(n) # Set value before setting or getting the array x.set_value(5.0) check_values(ceed, x, 5.0) # ------------------------------------------------------------------------------- # Test getArrayRead state counter # ------------------------------------------------------------------------------- def test_102(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) x.set_value(0) # Two read accesses should not generate an error a = x.get_array_read() b = x.get_array_read() x.restore_array_read() x.restore_array_read() # ------------------------------------------------------------------------------- # Test setting one vector from array of another vector # ------------------------------------------------------------------------------- def test_103(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) y = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) with x.array() as x_array: y.set_array(x_array, cmode=libceed.USE_POINTER) with y.array_read() as y_array: for i in range(n): assert y_array[i] == 10 + i # ------------------------------------------------------------------------------- # Test getArray to modify array # ------------------------------------------------------------------------------- def test_104(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) a = np.zeros(n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) with x.array() as b: b[3] = -3.14 if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32: assert a[3] == np.float32(-3.14) else: assert a[3] == -3.14 # ------------------------------------------------------------------------------- # Test creation, setting, reading, restoring, and destroying of a vector using # CEED_MEM_DEVICE # ------------------------------------------------------------------------------- def test_105(ceed_resource): # Skip test for non-GPU backend if 'gpu' in ceed_resource: ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) y = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) arr = x.get_array_read(memtype=libceed.MEM_DEVICE) y.set_array(arr, memtype=libceed.MEM_DEVICE) x.restore_array_read() with y.array_read() as b: for i in range(n): assert b[i] == 10 + i # ------------------------------------------------------------------------------- # Test view # ------------------------------------------------------------------------------- def test_107(ceed_resource, capsys): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) print(x) stdout, stderr, ref_stdout = check.output(capsys) assert not stderr assert stdout == ref_stdout # ------------------------------------------------------------------------------- # Test norms # ------------------------------------------------------------------------------- def test_108(ceed_resource, capsys): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) a = np.arange(0, n, dtype=ceed.scalar_type()) for i in range(n): if (i % 2 == 0): a[i] *= -1 x.set_array(a, cmode=libceed.USE_POINTER) norm = x.norm(normtype=libceed.NORM_1) assert abs(norm - 45.) < TOL norm = x.norm() assert abs(norm - np.sqrt(285.)) < TOL norm = x.norm(normtype=libceed.NORM_MAX) assert abs(norm - 9.) < TOL # ------------------------------------------------------------------------------- # Test taking the reciprocal of a vector # ------------------------------------------------------------------------------- def test_119(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.USE_POINTER) x.reciprocal() with x.array_read() as b: for i in range(n): assert abs(b[i] - 1. / (10 + i)) < TOL # ------------------------------------------------------------------------------- # Test AXPY # ------------------------------------------------------------------------------- def test_121(ceed_resource, capsys): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) y = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.COPY_VALUES) y.set_array(a, cmode=libceed.COPY_VALUES) y.axpy(-0.5, x) with y.array() as b: assert np.allclose(.5 * a, b) # ------------------------------------------------------------------------------- # Test pointwise multiplication # ------------------------------------------------------------------------------- def test_122(ceed_resource, capsys): ceed = libceed.Ceed(ceed_resource) n = 10 w = ceed.Vector(n) x = ceed.Vector(n) y = ceed.Vector(n) a = np.arange(0, n, dtype=ceed.scalar_type()) w.set_array(a, cmode=libceed.COPY_VALUES) x.set_array(a, cmode=libceed.COPY_VALUES) y.set_array(a, cmode=libceed.COPY_VALUES) w.pointwise_mult(x, y) with w.array() as b: for i in range(len(b)): assert abs(b[i] - i * i) < 1e-14 w.pointwise_mult(w, y) with w.array() as b: for i in range(len(b)): assert abs(b[i] - i * i * i) < 1e-14 w.pointwise_mult(x, w) with w.array() as b: for i in range(len(b)): assert abs(b[i] - i * i * i * i) < 1e-14 y.pointwise_mult(y, y) with y.array() as b: for i in range(len(b)): assert abs(b[i] - i * i) < 1e-14 # ------------------------------------------------------------------------------- # Test Scale # ------------------------------------------------------------------------------- def test_123(ceed_resource, capsys): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) a = np.arange(10, 10 + n, dtype=ceed.scalar_type()) x.set_array(a, cmode=libceed.COPY_VALUES) x.scale(-0.5) with x.array() as b: assert np.allclose(-.5 * a, b) # ------------------------------------------------------------------------------- # Test getArrayWrite to modify array # ------------------------------------------------------------------------------- def test_124(ceed_resource): ceed = libceed.Ceed(ceed_resource) n = 10 x = ceed.Vector(n) with x.array_write() as a: for i in range(len(a)): a[i] = 3 * i with x.array_read() as a: for i in range(len(a)): assert a[i] == 3 * i # ------------------------------------------------------------------------------- # Test modification of reshaped array # ------------------------------------------------------------------------------- def test_199(ceed_resource): """Modification of reshaped array""" ceed = libceed.Ceed(ceed_resource) vec = ceed.Vector(12) vec.set_value(0.0) with vec.array(4, 3) as x: x[...] = np.eye(4, 3) with vec.array_read(3, 4) as x: assert np.all(x == np.eye(4, 3).reshape(3, 4)) # -------------------------------------------------------------------------------
28.597734
81
0.470134
0
0
0
0
0
0
0
0
4,162
0.412283
60fe83ebf52b160eaac4df2f49fea7ababebb7f8
1,659
py
Python
esmvalcore/cmor/_fixes/cmip6/cesm2.py
aperezpredictia/ESMValCore
d5bf3f459ff3a43e780d75d57b63b88b6cc8c4f2
[ "Apache-2.0" ]
1
2019-11-28T13:09:42.000Z
2019-11-28T13:09:42.000Z
esmvalcore/cmor/_fixes/cmip6/cesm2.py
aperezpredictia/ESMValCore
d5bf3f459ff3a43e780d75d57b63b88b6cc8c4f2
[ "Apache-2.0" ]
null
null
null
esmvalcore/cmor/_fixes/cmip6/cesm2.py
aperezpredictia/ESMValCore
d5bf3f459ff3a43e780d75d57b63b88b6cc8c4f2
[ "Apache-2.0" ]
1
2019-11-29T00:50:30.000Z
2019-11-29T00:50:30.000Z
"""Fixes for CESM2 model.""" from ..fix import Fix from ..shared import (add_scalar_depth_coord, add_scalar_height_coord, add_scalar_typeland_coord, add_scalar_typesea_coord) class Fgco2(Fix): """Fixes for fgco2.""" def fix_metadata(self, cubes): """Add depth (0m) coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube """ cube = self.get_cube_from_list(cubes) add_scalar_depth_coord(cube) return cubes class Tas(Fix): """Fixes for tas.""" def fix_metadata(self, cubes): """Add height (2m) coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube """ cube = self.get_cube_from_list(cubes) add_scalar_height_coord(cube) return cubes class Sftlf(Fix): """Fixes for sftlf.""" def fix_metadata(self, cubes): """Add typeland coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube """ cube = self.get_cube_from_list(cubes) add_scalar_typeland_coord(cube) return cubes class Sftof(Fix): """Fixes for sftof.""" def fix_metadata(self, cubes): """Add typesea coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube """ cube = self.get_cube_from_list(cubes) add_scalar_typesea_coord(cube) return cubes
20.481481
74
0.540084
1,450
0.87402
0
0
0
0
0
0
794
0.478602
60fea0a23e4a5ea1e75b9b0eb479df0b1f05f8bb
707
py
Python
examples/GenerateSubset.py
vitay/YouTubeFacesDB
e7225e8d775ad64889fbee57a4452a25573a0360
[ "MIT" ]
11
2018-02-25T16:20:16.000Z
2021-07-27T02:46:09.000Z
examples/GenerateSubset.py
vitay/YouTubeFacesDB
e7225e8d775ad64889fbee57a4452a25573a0360
[ "MIT" ]
1
2018-12-14T19:56:16.000Z
2018-12-16T22:09:30.000Z
examples/GenerateSubset.py
vitay/YouTubeFacesDB
e7225e8d775ad64889fbee57a4452a25573a0360
[ "MIT" ]
3
2017-05-05T03:23:17.000Z
2019-11-11T01:39:25.000Z
from YouTubeFacesDB import generate_ytf_database ############################################################################### # Create the dataset ############################################################################### generate_ytf_database( directory= '../data',#'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset filename='ytfdb.h5', # Name of the HDF5 file to write to labels=10, # Number of labels to randomly select max_number=-1, # Maximum number of images to use size=(100, 100), # Size of the images color=False, # Black and white bw_first=True, # Final shape is (1, w, h) cropped=True # The original images are cropped to the faces )
47.133333
95
0.550212
0
0
0
0
0
0
0
0
481
0.680339
60ff05c6f96bea8d8b81dd6255359543dc3d93ad
1,562
py
Python
src/waldur_mastermind/billing/tests/test_price_current.py
opennode/nodeconductor-assembly-waldur
cad9966389dc9b52b13d2301940c99cf4b243900
[ "MIT" ]
2
2017-01-20T15:26:25.000Z
2017-08-03T04:38:08.000Z
src/waldur_mastermind/billing/tests/test_price_current.py
opennode/nodeconductor-assembly-waldur
cad9966389dc9b52b13d2301940c99cf4b243900
[ "MIT" ]
null
null
null
src/waldur_mastermind/billing/tests/test_price_current.py
opennode/nodeconductor-assembly-waldur
cad9966389dc9b52b13d2301940c99cf4b243900
[ "MIT" ]
null
null
null
from freezegun import freeze_time from rest_framework import test from waldur_mastermind.billing.tests.utils import get_financial_report_url from waldur_mastermind.invoices import models as invoice_models from waldur_mastermind.invoices.tests import factories as invoice_factories from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures @freeze_time('2017-01-10') class PriceCurrentTest(test.APITransactionTestCase): def setUp(self): self.fixture = invoice_fixtures.InvoiceFixture() invoice_factories.InvoiceItemFactory( invoice=self.fixture.invoice, project=self.fixture.project, unit=invoice_models.InvoiceItem.Units.PER_MONTH, unit_price=100, quantity=1, ) invoice_factories.InvoiceItemFactory( invoice=self.fixture.invoice, project=self.fixture.project, unit=invoice_models.InvoiceItem.Units.PER_DAY, unit_price=3, quantity=31, ) def test_current_price(self): self.client.force_authenticate(self.fixture.staff) url = get_financial_report_url(self.fixture.project.customer) response = self.client.get(url) self.assertEqual(response.status_code, 200) data = response.json() self.assertEqual(data['billing_price_estimate']['current'], 100 + 9 * 3) diff = ( data['billing_price_estimate']['total'] - data['billing_price_estimate']['current'] ) self.assertEqual(diff, 22 * 3)
37.190476
80
0.68822
1,176
0.752881
0
0
1,203
0.770166
0
0
109
0.069782
60ff33dc263000945ad4491d74e8e10a35657808
16,741
py
Python
tests/test_cli/test_utils/test_utils.py
ejfitzgerald/agents-aea
6411fcba8af2cdf55a3005939ae8129df92e8c3e
[ "Apache-2.0" ]
null
null
null
tests/test_cli/test_utils/test_utils.py
ejfitzgerald/agents-aea
6411fcba8af2cdf55a3005939ae8129df92e8c3e
[ "Apache-2.0" ]
null
null
null
tests/test_cli/test_utils/test_utils.py
ejfitzgerald/agents-aea
6411fcba8af2cdf55a3005939ae8129df92e8c3e
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This test module contains the tests for aea.cli.utils module.""" from builtins import FileNotFoundError from typing import cast from unittest import TestCase, mock from click import BadParameter, ClickException from jsonschema import ValidationError from yaml import YAMLError from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter from aea.cli.utils.config import ( _init_cli_config, get_or_create_cli_config, update_cli_config, ) from aea.cli.utils.context import Context from aea.cli.utils.decorators import _validate_config_consistency, clean_after from aea.cli.utils.formatting import format_items from aea.cli.utils.generic import is_readme_present from aea.cli.utils.package_utils import ( find_item_in_distribution, find_item_locally, is_fingerprint_correct, try_get_balance, try_get_item_source_path, try_get_item_target_path, validate_author_name, validate_package_name, ) from tests.conftest import FETCHAI from tests.test_cli.tools_for_testing import ( ConfigLoaderMock, ContextMock, PublicIdMock, StopTest, raise_stoptest, ) AUTHOR = "author" class FormatItemsTestCase(TestCase): """Test case for format_items method.""" def testformat_items_positive(self): """Test format_items positive result.""" items = [ { "public_id": "author/name:version", "name": "obj-name", "description": "Some description", "author": "author", "version": "1.0", } ] result = format_items(items) expected_result = ( "------------------------------\n" "Public ID: author/name:version\n" "Name: obj-name\n" "Description: Some description\n" "Author: author\n" "Version: 1.0\n" "------------------------------\n" ) self.assertEqual(result, expected_result) @mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path") class TryGetItemSourcePathTestCase(TestCase): """Test case for try_get_item_source_path method.""" @mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True) def test_get_item_source_path_positive(self, exists_mock, join_mock): """Test for get_item_source_path positive result.""" result = try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name") expected_result = "some-path" self.assertEqual(result, expected_result) join_mock.assert_called_once_with("cwd", AUTHOR, "skills", "skill-name") exists_mock.assert_called_once_with("some-path") result = try_get_item_source_path("cwd", None, "skills", "skill-name") self.assertEqual(result, expected_result) @mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False) def test_get_item_source_path_not_exists(self, exists_mock, join_mock): """Test for get_item_source_path item already exists.""" with self.assertRaises(ClickException): try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name") @mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path") class TryGetItemTargetPathTestCase(TestCase): """Test case for try_get_item_target_path method.""" @mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False) def test_get_item_target_path_positive(self, exists_mock, join_mock): """Test for get_item_source_path positive result.""" result = try_get_item_target_path("packages", AUTHOR, "skills", "skill-name") expected_result = "some-path" self.assertEqual(result, expected_result) join_mock.assert_called_once_with("packages", AUTHOR, "skills", "skill-name") exists_mock.assert_called_once_with("some-path") @mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True) def test_get_item_target_path_already_exists(self, exists_mock, join_mock): """Test for get_item_target_path item already exists.""" with self.assertRaises(ClickException): try_get_item_target_path("skills", AUTHOR, "skill-name", "packages_path") class PublicIdParameterTestCase(TestCase): """Test case for PublicIdParameter class.""" def test_get_metavar_positive(self): """Test for get_metavar positive result.""" result = PublicIdParameter.get_metavar("obj", "param") expected_result = "PUBLIC_ID" self.assertEqual(result, expected_result) @mock.patch("aea.cli.utils.config.os.path.dirname", return_value="dir-name") @mock.patch("aea.cli.utils.config.os.path.exists", return_value=False) @mock.patch("aea.cli.utils.config.os.makedirs") @mock.patch("builtins.open") class InitConfigFolderTestCase(TestCase): """Test case for _init_cli_config method.""" def test_init_cli_config_positive( self, open_mock, makedirs_mock, exists_mock, dirname_mock ): """Test for _init_cli_config method positive result.""" _init_cli_config() dirname_mock.assert_called_once() exists_mock.assert_called_once_with("dir-name") makedirs_mock.assert_called_once_with("dir-name") @mock.patch("aea.cli.utils.config.get_or_create_cli_config") @mock.patch("aea.cli.utils.generic.yaml.dump") @mock.patch("builtins.open", mock.mock_open()) class UpdateCLIConfigTestCase(TestCase): """Test case for update_cli_config method.""" def testupdate_cli_config_positive(self, dump_mock, icf_mock): """Test for update_cli_config method positive result.""" update_cli_config({"some": "config"}) icf_mock.assert_called_once() dump_mock.assert_called_once() def _raise_yamlerror(*args): raise YAMLError() def _raise_file_not_found_error(*args): raise FileNotFoundError() @mock.patch("builtins.open", mock.mock_open()) class GetOrCreateCLIConfigTestCase(TestCase): """Test case for read_cli_config method.""" @mock.patch( "aea.cli.utils.generic.yaml.safe_load", return_value={"correct": "output"} ) def testget_or_create_cli_config_positive(self, safe_load_mock): """Test for get_or_create_cli_config method positive result.""" result = get_or_create_cli_config() expected_result = {"correct": "output"} self.assertEqual(result, expected_result) safe_load_mock.assert_called_once() @mock.patch("aea.cli.utils.generic.yaml.safe_load", _raise_yamlerror) def testget_or_create_cli_config_bad_yaml(self): """Test for rget_or_create_cli_config method bad yaml behavior.""" with self.assertRaises(ClickException): get_or_create_cli_config() class CleanAfterTestCase(TestCase): """Test case for clean_after decorator method.""" @mock.patch("aea.cli.utils.decorators.os.path.exists", return_value=True) @mock.patch("aea.cli.utils.decorators._cast_ctx", lambda x: x) @mock.patch("aea.cli.utils.decorators.shutil.rmtree") def test_clean_after_positive(self, rmtree_mock, *mocks): """Test clean_after decorator method for positive result.""" @clean_after def func(click_context): ctx = cast(Context, click_context.obj) ctx.clean_paths.append("clean/path") raise ClickException("Message") with self.assertRaises(ClickException): func(ContextMock()) rmtree_mock.assert_called_once_with("clean/path") @mock.patch("aea.cli.utils.package_utils.click.echo", raise_stoptest) class ValidateAuthorNameTestCase(TestCase): """Test case for validate_author_name method.""" @mock.patch( "aea.cli.utils.package_utils.click.prompt", return_value="correct_author" ) def test_validate_author_name_positive(self, prompt_mock): """Test validate_author_name for positive result.""" author = "valid_author" result = validate_author_name(author=author) self.assertEqual(result, author) result = validate_author_name() self.assertEqual(result, "correct_author") prompt_mock.assert_called_once() @mock.patch( "aea.cli.utils.package_utils.click.prompt", return_value="inv@l1d_@uth&r" ) def test_validate_author_name_negative(self, prompt_mock): """Test validate_author_name for negative result.""" with self.assertRaises(StopTest): validate_author_name() prompt_mock.return_value = "skills" with self.assertRaises(StopTest): validate_author_name() class ValidatePackageNameTestCase(TestCase): """Test case for validate_package_name method.""" def test_validate_package_name_positive(self): """Test validate_package_name for positive result.""" validate_package_name("correct_name") def test_validate_package_name_negative(self): """Test validate_package_name for negative result.""" with self.assertRaises(BadParameter): validate_package_name("incorrect-name") def _raise_validation_error(*args, **kwargs): raise ValidationError("Message.") class FindItemLocallyTestCase(TestCase): """Test case for find_item_locally method.""" @mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True) @mock.patch( "aea.cli.utils.package_utils.ConfigLoader.from_configuration_type", _raise_validation_error, ) def test_find_item_locally_bad_config(self, *mocks): """Test find_item_locally for bad config result.""" public_id = PublicIdMock.from_str("fetchai/echo:0.5.0") with self.assertRaises(ClickException) as cm: find_item_locally(ContextMock(), "skill", public_id) self.assertIn("configuration file not valid", cm.exception.message) @mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True) @mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open()) @mock.patch( "aea.cli.utils.package_utils.ConfigLoader.from_configuration_type", return_value=ConfigLoaderMock(), ) def test_find_item_locally_cant_find(self, from_conftype_mock, *mocks): """Test find_item_locally for can't find result.""" public_id = PublicIdMock.from_str("fetchai/echo:0.5.0") with self.assertRaises(ClickException) as cm: find_item_locally(ContextMock(), "skill", public_id) self.assertEqual( cm.exception.message, "Cannot find skill with author and version specified." ) class FindItemInDistributionTestCase(TestCase): """Test case for find_item_in_distribution method.""" @mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True) @mock.patch( "aea.cli.utils.package_utils.ConfigLoader.from_configuration_type", _raise_validation_error, ) def testfind_item_in_distribution_bad_config(self, *mocks): """Test find_item_in_distribution for bad config result.""" public_id = PublicIdMock.from_str("fetchai/echo:0.5.0") with self.assertRaises(ClickException) as cm: find_item_in_distribution(ContextMock(), "skill", public_id) self.assertIn("configuration file not valid", cm.exception.message) @mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=False) def testfind_item_in_distribution_not_found(self, *mocks): """Test find_item_in_distribution for not found result.""" public_id = PublicIdMock.from_str("fetchai/echo:0.5.0") with self.assertRaises(ClickException) as cm: find_item_in_distribution(ContextMock(), "skill", public_id) self.assertIn("Cannot find skill", cm.exception.message) @mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True) @mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open()) @mock.patch( "aea.cli.utils.package_utils.ConfigLoader.from_configuration_type", return_value=ConfigLoaderMock(), ) def testfind_item_in_distribution_cant_find(self, from_conftype_mock, *mocks): """Test find_item_locally for can't find result.""" public_id = PublicIdMock.from_str("fetchai/echo:0.5.0") with self.assertRaises(ClickException) as cm: find_item_in_distribution(ContextMock(), "skill", public_id) self.assertEqual( cm.exception.message, "Cannot find skill with author and version specified." ) class ValidateConfigConsistencyTestCase(TestCase): """Test case for _validate_config_consistency method.""" @mock.patch("aea.cli.utils.config.Path.exists", _raise_validation_error) def test__validate_config_consistency_cant_find(self, *mocks): """Test _validate_config_consistency can't find result""" with self.assertRaises(ValueError) as cm: _validate_config_consistency(ContextMock(protocols=["some"])) self.assertIn("Cannot find", str(cm.exception)) @mock.patch( "aea.cli.utils.package_utils._compute_fingerprint", return_value={"correct": "fingerprint"}, ) class IsFingerprintCorrectTestCase(TestCase): """Test case for adding skill with invalid fingerprint.""" def test_is_fingerprint_correct_positive(self, *mocks): """Test is_fingerprint_correct method for positive result.""" item_config = mock.Mock() item_config.fingerprint = {"correct": "fingerprint"} item_config.fingerprint_ignore_patterns = [] result = is_fingerprint_correct("package_path", item_config) self.assertTrue(result) def test_is_fingerprint_correct_negative(self, *mocks): """Test is_fingerprint_correct method for negative result.""" item_config = mock.Mock() item_config.fingerprint = {"incorrect": "fingerprint"} item_config.fingerprint_ignore_patterns = [] package_path = "package_dir" result = is_fingerprint_correct(package_path, item_config) self.assertFalse(result) @mock.patch("aea.cli.config.click.ParamType") class AEAJsonPathTypeTestCase(TestCase): """Test case for AEAJsonPathType class.""" @mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=True) def test_convert_root_vendor_positive(self, *mocks): """Test for convert method with root "vendor" positive result.""" value = "vendor.author.protocols.package_name.attribute_name" ctx_mock = ContextMock() ctx_mock.obj = mock.Mock() ctx_mock.obj.set_config = mock.Mock() obj = AEAJsonPathType() obj.convert(value, "param", ctx_mock) @mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=False) def test_convert_root_vendor_path_not_exists(self, *mocks): """Test for convert method with root "vendor" path not exists.""" value = "vendor.author.protocols.package_name.attribute_name" obj = AEAJsonPathType() with self.assertRaises(BadParameter): obj.convert(value, "param", "ctx") @mock.patch("aea.cli.utils.package_utils.LedgerApis", mock.MagicMock()) class TryGetBalanceTestCase(TestCase): """Test case for try_get_balance method.""" def test_try_get_balance_positive(self): """Test for try_get_balance method positive result.""" agent_config = mock.Mock() agent_config.default_ledger_config = FETCHAI wallet_mock = mock.Mock() wallet_mock.addresses = {FETCHAI: "some-adress"} try_get_balance(agent_config, wallet_mock, FETCHAI) @mock.patch("aea.cli.utils.generic.os.path.exists", return_value=True) class IsReadmePresentTestCase(TestCase): """Test case for is_readme_present method.""" def test_is_readme_present_positive(self, *mocks): """Test is_readme_present for positive result.""" self.assertTrue(is_readme_present("readme/path"))
39.206089
88
0.696792
13,569
0.810525
0
0
12,452
0.743803
0
0
6,169
0.368497
60ff5c6f7092666241901b36f6825248e6f4d160
360
py
Python
api/flat/urls.py
SanjarbekSaminjonov/musofirlar.backend
23b09e90cc4e3d153063ad1768b5ae1c18ff866d
[ "Apache-2.0" ]
1
2021-12-23T12:43:17.000Z
2021-12-23T12:43:17.000Z
api/flat/urls.py
SanjarbekSaminjonov/musofirlar.backend
23b09e90cc4e3d153063ad1768b5ae1c18ff866d
[ "Apache-2.0" ]
null
null
null
api/flat/urls.py
SanjarbekSaminjonov/musofirlar.backend
23b09e90cc4e3d153063ad1768b5ae1c18ff866d
[ "Apache-2.0" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path('', views.FlatListAPIView.as_view()), path('create/', views.FlatCreateAPIView.as_view()), path('<int:pk>/', views.FlatDetailAPIView.as_view()), path('<int:pk>/update/', views.FlatUpdateAPIView.as_view()), path('<int:pk>/delete/', views.FlatDeleteAPIView.as_view()), ]
30
64
0.683333
0
0
0
0
0
0
0
0
58
0.161111
8801787fa421093191e86dccf0ba799d1e648912
506
py
Python
hyssop_aiohttp/component/__init__.py
hsky77/hyssop
4ab1e82f9e2592de56589c7426a037564bef49a6
[ "MIT" ]
null
null
null
hyssop_aiohttp/component/__init__.py
hsky77/hyssop
4ab1e82f9e2592de56589c7426a037564bef49a6
[ "MIT" ]
null
null
null
hyssop_aiohttp/component/__init__.py
hsky77/hyssop
4ab1e82f9e2592de56589c7426a037564bef49a6
[ "MIT" ]
null
null
null
# Copyright (C) 2020-Present the hyssop authors and contributors. # # This module is part of hyssop and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php ''' File created: January 1st 2021 Modified By: hsky77 Last Updated: January 7th 2021 15:30:08 pm ''' from hyssop.project.component import ComponentTypes from .aio_client import AioClientComponent class AioHttpComponentTypes(ComponentTypes): AioClient = ('aioclient', 'aio_client', 'AioClientComponent')
25.3
69
0.774704
110
0.217391
0
0
0
0
0
0
333
0.658103
88019b110382885f8543e3444fa6b00a5c38b567
3,691
py
Python
run_clone.py
tGhattas/IMP-seamless-cloning
2c81e0bd9bc99955afe06ec4eea187a5a42761e3
[ "MIT" ]
null
null
null
run_clone.py
tGhattas/IMP-seamless-cloning
2c81e0bd9bc99955afe06ec4eea187a5a42761e3
[ "MIT" ]
null
null
null
run_clone.py
tGhattas/IMP-seamless-cloning
2c81e0bd9bc99955afe06ec4eea187a5a42761e3
[ "MIT" ]
null
null
null
import cv2 import getopt import sys from gui import MaskPainter, MaskMover from clone import seamless_cloning, shepards_seamless_cloning from utils import read_image, plt from os import path def usage(): print( "Usage: python run_clone.py [options] \n\n\ Options: \n\ \t-h\t Flag to specify a brief help message and exits..\n\ \t-s\t(Required) Specify a source image.\n\ \t-t\t(Required) Specify a target image.\n\ \t-m\t(Optional) Specify a mask image with the object in white and other part in black, ignore this option if you plan to draw it later.\n\ \t-x\t(Optional) Flag to specify a mode, either 'possion' or 'shepard'. default is possion.\n\ \t-v\t(Optional) Flag to specify grad field of source only or both in case of Possion solver is used. default is source only.") if __name__ == '__main__': # parse command line arguments args = {} try: opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:") except getopt.GetoptError as err: # print help information and exit: print(err) # will print something like "option -a not recognized" print("See help: run_clone.py -h") exit(2) for o, a in opts: if o in ("-h"): usage() exit() elif o in ("-s"): args["source"] = a elif o in ("-t"): args["target"] = a elif o in ("-m"): args["mask"] = a elif o in ("-x"): args["mode"] = a.lower() elif o in ("-v"): args["gradient_field_source_only"] = a else: continue # if ("source" not in args) or ("target" not in args): usage() exit() # # set default mode to Possion solver mode = "poisson" if ("mode" not in args) else args["mode"] gradient_field_source_only = ("gradient_field_source_only" not in args) source = read_image(args["source"], 2) target = read_image(args["target"], 2) if source is None or target is None: print('Source or target image not exist.') exit() if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]: print('Source image cannot be larger than target image.') exit() # draw the mask mask_path = "" if "mask" not in args: print('Please highlight the object to disapparate.\n') mp = MaskPainter(args["source"]) mask_path = mp.paint_mask() else: mask_path = args["mask"] # adjust mask position for target image print('Please move the object to desired location to apparate.\n') mm = MaskMover(args["target"], mask_path) offset_x, offset_y, target_mask_path = mm.move_mask() # blend print('Blending ...') target_mask = read_image(target_mask_path, 1) offset = offset_x, offset_y cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {} blend_result = cloning_tool(source, target, target_mask, offset, **kwargs) cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'), blend_result) plt.figure("Result"), plt.imshow(blend_result), plt.show() print('Done.\n') ''' running example: - Possion based solver: python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg python run_clone.py -s external/source3.jpg -t external/target3.jpg -v - Shepard's interpolation: python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x python run_clone.py -s external/source3.jpg -t external/target3.jpg -x '''
33.554545
147
0.629098
0
0
0
0
0
0
0
0
1,731
0.468979
8801ff2af63497d7ca9dadd57139f98ae23b3370
5,081
py
Python
punkweb_boards/rest/serializers.py
Punkweb/punkweb-boards
8934d15fbff2a3ce9191fdb19d58d029eb55ef16
[ "BSD-3-Clause" ]
20
2018-02-22T11:36:04.000Z
2022-03-22T11:48:22.000Z
punkweb_boards/rest/serializers.py
imgVOID/punkweb-boards
8934d15fbff2a3ce9191fdb19d58d029eb55ef16
[ "BSD-3-Clause" ]
28
2018-02-22T07:11:46.000Z
2022-02-23T08:05:29.000Z
punkweb_boards/rest/serializers.py
imgVOID/punkweb-boards
8934d15fbff2a3ce9191fdb19d58d029eb55ef16
[ "BSD-3-Clause" ]
5
2018-02-25T11:05:19.000Z
2021-05-27T02:25:31.000Z
from rest_framework import serializers from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS from punkweb_boards.models import ( BoardProfile, Category, Subcategory, Thread, Post, Conversation, Message, Report, Shout, ) class BoardProfileSerializer(serializers.ModelSerializer): post_count = serializers.ReadOnlyField() can_shout = serializers.ReadOnlyField() rendered_username = serializers.ReadOnlyField() rendered_rank = serializers.ReadOnlyField() class Meta: model = BoardProfile fields = "__all__" class CategorySerializer(serializers.ModelSerializer): class Meta: model = Category exclude = ("auth_req",) class SubcategorySerializer(serializers.ModelSerializer): last_thread = serializers.ReadOnlyField(source="last_thread.id") last_thread_title = serializers.ReadOnlyField(source="last_thread.title") last_thread_created = serializers.ReadOnlyField( source="last_thread.created" ) last_thread_user = serializers.ReadOnlyField( source="last_thread.user.profile.rendered_username" ) parent_name = serializers.ReadOnlyField(source="parent.name") thread_count = serializers.ReadOnlyField() post_count = serializers.ReadOnlyField() can_post = serializers.SerializerMethodField() def get_can_post(self, obj): return obj.can_post(self.context.get("request").user) class Meta: model = Subcategory exclude = ("auth_req",) class ThreadSerializer(serializers.ModelSerializer): last_post = serializers.ReadOnlyField(source="last_post.id") last_post_created = serializers.ReadOnlyField(source="last_post.created") last_post_username = serializers.ReadOnlyField( source="last_post.user.username" ) last_post_rendered_username = serializers.ReadOnlyField( source="last_post.user.profile.rendered_username" ) user_username = serializers.ReadOnlyField(source="user.username") user_rendered_username = serializers.ReadOnlyField( source="user.profile.rendered_username" ) user_image = serializers.ReadOnlyField(source="user.profile.avatar") user_post_count = serializers.ReadOnlyField( source="user.profile.post_count" ) user_join_date = serializers.ReadOnlyField(source="user.created") flagged = serializers.ReadOnlyField(source="reported") posts_count = serializers.ReadOnlyField() can_edit = serializers.SerializerMethodField() def get_can_edit(self, obj): return obj.can_edit(self.context.get("request").user) class Meta: model = Thread fields = "__all__" read_only_fields = ( "pinned", "closed", "user", "upvoted_by", "downvoted_by", ) class PostSerializer(serializers.ModelSerializer): flagged = serializers.ReadOnlyField(source="reported") can_edit = serializers.SerializerMethodField() def get_can_edit(self, obj): return obj.can_edit(self.context.get("request").user) class Meta: model = Post fields = "__all__" read_only_fields = ("user", "upvoted_by", "downvoted_by") class ConversationSerializer(serializers.ModelSerializer): last_message = serializers.ReadOnlyField(source="last_message.id") last_message_title = serializers.ReadOnlyField(source="last_message.title") last_message_created = serializers.ReadOnlyField( source="last_message.created" ) last_message_user = serializers.ReadOnlyField( source="last_message.user.profile.rendered_username" ) message_count = serializers.ReadOnlyField() class Meta: model = Conversation fields = "__all__" read_only_fields = ("unread_by",) class MessageSerializer(serializers.ModelSerializer): class Meta: model = Message fields = "__all__" read_only_fields = ("user",) class ShoutSerializer(serializers.ModelSerializer): username = serializers.ReadOnlyField(source="user.username") rendered_username = serializers.ReadOnlyField( source="user.profile.rendered_username" ) class Meta: model = Shout fields = ( "id", "user", "username", "rendered_username", "content", "_content_rendered", "created", "modified", ) read_only_fields = ("user",) def create(self, validated_data): for key in SHOUTBOX_DISABLED_TAGS: key_tag = "[{}]".format(key).lower() if ( key_tag[: len(key_tag) - 1] in validated_data.get("content").lower() ): raise serializers.ValidationError( { "notAllowed": "{} is not allowed in the shoutbox".format( key_tag ) } ) return Shout.objects.create(**validated_data)
30.793939
81
0.657745
4,791
0.942925
0
0
0
0
0
0
834
0.164141
88031b336437f0a5497f94eace7653d85a0ddb61
1,326
py
Python
runtime/components/Statistic/moving_minimum_time.py
ulise/hetida-designer
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
[ "MIT" ]
41
2020-11-18T10:12:29.000Z
2022-03-28T21:46:41.000Z
runtime/components/Statistic/moving_minimum_time.py
ulise/hetida-designer
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
[ "MIT" ]
4
2020-12-08T15:28:15.000Z
2022-02-01T11:40:17.000Z
runtime/components/Statistic/moving_minimum_time.py
ulise/hetida-designer
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
[ "MIT" ]
14
2020-11-18T11:39:17.000Z
2022-03-21T15:05:11.000Z
from hetdesrun.component.registration import register from hetdesrun.datatypes import DataType import pandas as pd import numpy as np # ***** DO NOT EDIT LINES BELOW ***** # These lines may be overwritten if input/output changes. @register( inputs={"data": DataType.Any, "t": DataType.String}, outputs={"movmin": DataType.Any}, ) def main(*, data, t): """entrypoint function for this component Usage example: >>> main( ... data = pd.Series( ... { ... "2019-08-01T15:20:00": 4.0, ... "2019-08-01T15:20:01": 5.0, ... "2019-08-01T15:20:05": 1.0, ... "2019-08-01T15:20:09": 9.0, ... } ... ), ... t = "4s" ... )["movmin"] 2019-08-01 15:20:00 4.0 2019-08-01 15:20:01 4.0 2019-08-01 15:20:05 1.0 2019-08-01 15:20:09 9.0 dtype: float64 """ # ***** DO NOT EDIT LINES ABOVE ***** # write your code here. try: data.index = pd.to_datetime(data.index) except (ValueError, TypeError): raise TypeError("indices of data must be datetime") data_sort = data.sort_index().dropna() try: return {"movmin": data_sort.rolling(t).min()} except (ValueError): raise ValueError(f"t could not be parsed as frequency: {t}")
28.212766
68
0.555053
0
0
0
0
1,093
0.824284
0
0
789
0.595023
88044ce700e39ec36bb7ba44d3c9905b593ae3a4
4,460
py
Python
painter.py
MikhailNakhatovich/rooms_painting
51b92797c867d4bb1c8d42a58785c0f4dacd4075
[ "MIT" ]
null
null
null
painter.py
MikhailNakhatovich/rooms_painting
51b92797c867d4bb1c8d42a58785c0f4dacd4075
[ "MIT" ]
null
null
null
painter.py
MikhailNakhatovich/rooms_painting
51b92797c867d4bb1c8d42a58785c0f4dacd4075
[ "MIT" ]
null
null
null
import cv2 import ezdxf import numpy as np def draw_hatch(img, entity, color, mask): for poly_path in entity.paths.paths: # print(poly_path.path_type_flags) polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int) if poly_path.path_type_flags & 1 == 1: cv2.fillPoly(img, [polygon], color) cv2.fillPoly(mask, [polygon], (255, 255, 255)) else: cv2.fillPoly(img, [polygon], (255, 255, 255)) return color def draw_line(img, entity, color, mask): p1 = entity.dxf.start[:-1] p2 = entity.dxf.end[:-1] cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1) cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2) return color def draw_lwpolyline(img, entity, color, mask): polyline = [] a = np.array(entity.lwpoints.values).astype(int) while len(a) > 0: polyline.append((a[0], a[1])) a = a[5:] cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1) cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2) return color def draw_arc(img, entity, color, mask): s = entity.dxf.start_angle * np.pi / 180 e = entity.dxf.end_angle * np.pi / 180 if s > e: s -= 2 * np.pi d = (e - s) / (int((e - s) * 180 / np.pi) + 1) r = entity.dxf.radius cx, cy = entity.dxf.center.xyz[:-1] angles = np.arange(s, e + d / 2, d) x = cx + r * np.cos(angles) y = cy + r * np.sin(angles) points = np.column_stack((x, y)).astype(int) cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1) cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2) return color def draw_circle(img, entity, color, mask): r = entity.dxf.radius cx, cy = entity.dxf.center.xyz[:-1] cv2.circle(img, (int(cx), int(cy)), int(r), color, 1) cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1) return color def draw_ellipse(img, entity, color, mask): cx, cy = entity.dxf.center.xyz[:-1] ma = entity.dxf.major_axis.magnitude angle = entity.dxf.major_axis.angle_deg mi = ma * entity.dxf.ratio s = entity.dxf.start_param * 180 / np.pi e = entity.dxf.end_param * 180 / np.pi if entity.dxf.extrusion.z == -1: s = 360 - s e = 360 - e cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1) cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1) return color def draw_point(img, entity, color, mask): cx, cy = entity.dxf.location.xyz[:-1] cv2.circle(img, (int(cx), int(cy)), 0, color, 1) cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1) return color draw_map = { 'HATCH': draw_hatch, 'LINE': draw_line, 'LWPOLYLINE': draw_lwpolyline, 'ARC': draw_arc, 'CIRCLE': draw_circle, 'ELLIPSE': draw_ellipse, 'POINT': draw_point, } def paint(in_path, out_path, config): doc = ezdxf.readfile(in_path) extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN'] xmin, ymin = np.floor(extmin[:-1]).astype(int) xmax, ymax = np.ceil(extmax[:-1]).astype(int) img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255 mask = np.zeros_like(img) msp = doc.modelspace() layers = config.get('layers', {}) colors = config.get('colors', {}) # print(doc.layers.entries.keys()) for layer_name, names in layers.items(): color = tuple(colors.get(layer_name, [0, 0, 0])) for name in names: if name not in doc.layers: continue entities = msp.query('*[layer=="%s"]' % name) tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8) for entity in entities: if entity.DXFTYPE in draw_map: draw_map[entity.DXFTYPE](img, entity, color, tmp) else: print("%s: %s" % (name, entity.DXFTYPE)) contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(mask, contours, -1, color, -1) res, img_png = cv2.imencode('.png', cv2.flip(img, 0)) res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0)) with open(out_path, 'wb') as f: f.write(img_png.tobytes()) with open(out_path[:-4] + "_mask.png", 'wb') as f: f.write(mask_png.tobytes())
35.11811
99
0.578924
0
0
0
0
0
0
0
0
211
0.047309
8804685e3bac745bbfacb5b5cab8b6e032a05238
3,064
py
Python
misago/misago/users/serializers/auth.py
vascoalramos/misago-deployment
20226072138403108046c0afad9d99eb4163cedc
[ "MIT" ]
2
2021-03-06T21:06:13.000Z
2021-03-09T15:05:12.000Z
misago/misago/users/serializers/auth.py
vascoalramos/misago-deployment
20226072138403108046c0afad9d99eb4163cedc
[ "MIT" ]
null
null
null
misago/misago/users/serializers/auth.py
vascoalramos/misago-deployment
20226072138403108046c0afad9d99eb4163cedc
[ "MIT" ]
null
null
null
from django.contrib.auth import get_user_model from django.urls import reverse from rest_framework import serializers from ...acl.useracl import serialize_user_acl from .user import UserSerializer User = get_user_model() __all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"] class AuthFlags: def get_is_authenticated(self, obj): return bool(obj.is_authenticated) def get_is_anonymous(self, obj): return bool(obj.is_anonymous) class AuthenticatedUserSerializer(UserSerializer, AuthFlags): email = serializers.SerializerMethodField() is_authenticated = serializers.SerializerMethodField() is_anonymous = serializers.SerializerMethodField() class Meta: model = User fields = UserSerializer.Meta.fields + [ "has_usable_password", "is_hiding_presence", "limits_private_thread_invites_to", "unread_private_threads", "subscribe_to_started_threads", "subscribe_to_replied_threads", "is_authenticated", "is_anonymous", ] def get_acl(self, obj): acl = self.context.get("acl") if acl: return serialize_user_acl(acl) return {} def get_email(self, obj): return obj.email def get_api(self, obj): return { "avatar": reverse("misago:api:user-avatar", kwargs={"pk": obj.pk}), "data_downloads": reverse( "misago:api:user-data-downloads", kwargs={"pk": obj.pk} ), "details": reverse("misago:api:user-details", kwargs={"pk": obj.pk}), "change_email": reverse( "misago:api:user-change-email", kwargs={"pk": obj.pk} ), "change_password": reverse( "misago:api:user-change-password", kwargs={"pk": obj.pk} ), "edit_details": reverse( "misago:api:user-edit-details", kwargs={"pk": obj.pk} ), "options": reverse("misago:api:user-forum-options", kwargs={"pk": obj.pk}), "request_data_download": reverse( "misago:api:user-request-data-download", kwargs={"pk": obj.pk} ), "username": reverse("misago:api:user-username", kwargs={"pk": obj.pk}), "delete": reverse( "misago:api:user-delete-own-account", kwargs={"pk": obj.pk} ), } AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields( "is_avatar_locked", "is_blocked", "is_followed", "is_signature_locked", "meta", "signature", "status", ) class AnonymousUserSerializer(serializers.Serializer, AuthFlags): id = serializers.ReadOnlyField() acl = serializers.SerializerMethodField() is_authenticated = serializers.SerializerMethodField() is_anonymous = serializers.SerializerMethodField() def get_acl(self, obj): acl = self.context.get("acl") if acl: return serialize_user_acl(acl) return {}
31.587629
87
0.616841
2,553
0.833225
0
0
0
0
0
0
818
0.266971
8804c3b09c4502328bb0532182f3bbfcec72facf
2,171
py
Python
shop/models.py
mohammadanarul/Ecommerce-Django-YT
afecc8f41693925619b81986d979706c64175360
[ "MIT" ]
null
null
null
shop/models.py
mohammadanarul/Ecommerce-Django-YT
afecc8f41693925619b81986d979706c64175360
[ "MIT" ]
null
null
null
shop/models.py
mohammadanarul/Ecommerce-Django-YT
afecc8f41693925619b81986d979706c64175360
[ "MIT" ]
null
null
null
from ctypes.wintypes import CHAR from distutils.command.upload import upload from random import choice from telnetlib import STATUS from unicodedata import category from django.db import models from ckeditor.fields import RichTextField from taggit.managers import TaggableManager # Create your models here. from mptt.models import MPTTModel, TreeForeignKey class Category(MPTTModel): name = models.CharField(max_length=50, unique=True) parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children') is_active = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class MPTTMeta: order_insertion_by = ['name'] class Brand(models.Model): name = models.CharField(max_length=50) is_active = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Product(models.Model): STATUS_CHOICES = ( ('NONE', 'NONE'), ('NEW', 'NEW'), ('SALE', 'SALE'), ('HOT', 'HOT'), ) title = models.CharField(max_length=50) price = models.DecimalField(max_digits=5, decimal_places=2) short_description = RichTextField() tags = TaggableManager() description = RichTextField() specification = RichTextField() image = models.ImageField(upload_to='product/') category = models.ForeignKey(Category, on_delete=models.CASCADE) brand = models.ForeignKey(Brand, on_delete=models.CASCADE) stack = models.IntegerField(default=5) status = models.CharField(max_length=5, choices=STATUS_CHOICES, default='NONE') is_fetured = models.BooleanField(default=False) is_special = models.BooleanField(default=False) is_active = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class ProductImages(models.Model): category = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images') image = models.ImageField(upload_to='products/')
38.767857
109
0.740212
1,805
0.831414
0
0
0
0
0
0
127
0.058498
8805528dd519906fc019a797eb45969b31e9b633
7,470
py
Python
supriya/patterns/NoteEvent.py
deeuu/supriya
14fcb5316eccb4dafbe498932ceff56e1abb9d27
[ "MIT" ]
null
null
null
supriya/patterns/NoteEvent.py
deeuu/supriya
14fcb5316eccb4dafbe498932ceff56e1abb9d27
[ "MIT" ]
null
null
null
supriya/patterns/NoteEvent.py
deeuu/supriya
14fcb5316eccb4dafbe498932ceff56e1abb9d27
[ "MIT" ]
null
null
null
import uuid import supriya.commands import supriya.realtime from supriya.patterns.Event import Event class NoteEvent(Event): ### CLASS VARIABLES ### __slots__ = () ### INITIALIZER ### def __init__( self, add_action=None, delta=None, duration=None, is_stop=True, synthdef=None, target_node=None, uuid=None, **settings, ): if add_action is not None: add_action = supriya.AddAction.from_expr(add_action) Event.__init__( self, add_action=add_action, delta=delta, duration=duration, is_stop=bool(is_stop), synthdef=synthdef, target_node=target_node, uuid=uuid, **settings, ) ### PRIVATE METHODS ### def _perform_nonrealtime(self, session, uuids, offset, maximum_offset=None): import supriya.assets.synthdefs settings = self.settings.copy() # Do not mutate in place. synthdef = self.get("synthdef", supriya.assets.synthdefs.default) synthdef = synthdef or supriya.assets.synthdefs.default synth_uuid = self.get("uuid", uuid.uuid4()) is_stop = self.get("is_stop") duration = self.get("duration") if duration is None: duration = 1 if "duration" in settings: duration = settings.pop("duration") dictionaries = self._expand( settings, synthdef, uuids, realtime=False, synth_parameters_only=True ) if synth_uuid not in uuids: # Begin a Pbind or Pmono synth target_node = self["target_node"] if isinstance(target_node, uuid.UUID) and target_node in uuids: target_node = uuids[target_node] prototype = (supriya.nonrealtime.Session, supriya.nonrealtime.Node) if not isinstance(target_node, prototype): target_node = session synths = [] with session.at(offset): for dictionary in dictionaries: synth = target_node.add_synth( add_action=self["add_action"], duration=duration, synthdef=synthdef, **dictionary, ) synths.append(synth) if not is_stop: uuids[synth_uuid] = tuple(synths) else: # Extend and make settings on a Pmono synth synths = uuids[synth_uuid] stop_offset = offset + duration for synth, dictionary in zip(synths, dictionaries): duration = stop_offset - synth.start_offset synth.set_duration(duration) with session.at(offset): for key, value in dictionary.items(): synth[key] = value return offset + max(self.delta, self.get("duration", 0)) def _perform_realtime(self, index=0, server=None, timestamp=0, uuids=None): import supriya.assets.synthdefs import supriya.patterns synth_uuid = self.get("uuid") or uuid.uuid4() synthdef = self.get("synthdef", supriya.assets.synthdefs.default) synthdef = synthdef or supriya.assets.synthdefs.default is_stop = self.get("is_stop") duration = self["duration"] if duration is None: duration = 1 dictionaries = self._expand(self.settings, synthdef, uuids) first_visit = False if synth_uuid not in uuids: first_visit = True node_ids = { server.node_id_allocator.allocate_node_id(): None for _ in range(len(dictionaries)) } uuids[synth_uuid] = node_ids start_product = self._build_start_bundle( dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids ) if self.get("duration"): if is_stop: stop_product = self._build_stop_bundle( index, synth_uuid, synthdef, timestamp, uuids ) else: stop_product = supriya.patterns.EventProduct( event=None, index=index, is_stop=True, requests=(), timestamp=timestamp + duration, uuid=None, ) return [start_product, stop_product] else: uuids.pop(synth_uuid) return [start_product] def _build_start_bundle( self, dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids ): import supriya.patterns requests = [] node_ids = uuids[synth_uuid] if first_visit: for node_id, dictionary in zip(node_ids, dictionaries): add_action = dictionary.pop("add_action") target_node = dictionary.pop("target_node") if target_node is None: target_node = 1 synth_kwargs = { key: value for key, value in dictionary.items() if key in synthdef.parameter_names } request = supriya.commands.SynthNewRequest( add_action=add_action, node_id=node_id, synthdef=synthdef, target_node_id=target_node, **synth_kwargs, ) requests.append(request) synth = supriya.realtime.Synth(synthdef) node_ids[node_id] = synth else: for node_id, dictionary in zip(node_ids, dictionaries): synth_kwargs = { key: value for key, value in dictionary.items() if key in synthdef.parameter_names } request = supriya.commands.NodeSetRequest( node_id=node_id, **synth_kwargs ) requests.append(request) event_product = supriya.patterns.EventProduct( event=self, index=index, is_stop=False, requests=requests, timestamp=timestamp, uuid=synth_uuid, ) return event_product def _build_stop_bundle(self, index, synth_uuid, synthdef, timestamp, uuids): import supriya.patterns import supriya.synthdefs duration = self["duration"] if duration is None: duration = 1 requests = [] timestamp = timestamp + duration node_ids = sorted(uuids[synth_uuid]) if synthdef.has_gate: for node_id in node_ids: request = supriya.commands.NodeSetRequest(node_id=node_id, gate=0) requests.append(request) elif any(x >= supriya.DoneAction.FREE_SYNTH for x in synthdef.done_actions): pass else: request = supriya.commands.NodeFreeRequest(node_ids=node_ids) requests.append(request) event_product = supriya.patterns.EventProduct( event=self, index=index, is_stop=True, requests=requests, timestamp=timestamp, uuid=synth_uuid, ) return event_product
35.571429
86
0.545382
7,365
0.985944
0
0
0
0
0
0
333
0.044578
88055aadf736a00daf291c08df0121953d6b59c8
443
py
Python
emoji_utils.py
ApacheAA/LastSeen
1fe675b3ee3072d56e9fe094d1d80e1f7d876215
[ "MIT" ]
null
null
null
emoji_utils.py
ApacheAA/LastSeen
1fe675b3ee3072d56e9fe094d1d80e1f7d876215
[ "MIT" ]
null
null
null
emoji_utils.py
ApacheAA/LastSeen
1fe675b3ee3072d56e9fe094d1d80e1f7d876215
[ "MIT" ]
1
2021-04-04T02:46:10.000Z
2021-04-04T02:46:10.000Z
# unicode digit emojis # digits from '0' to '9' zero_digit_code = zd = 48 # excluded digits excl_digits = [2, 4, 5, 7] # unicode digit keycap udkc = '\U0000fe0f\U000020e3' hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10) if i - zd not in excl_digits] # number '10' emoji hours_0_9.append('\U0001f51f') # custom emojis from '11' to '23' hours_11_23 = [str(i) for i in range(11, 24)] vote = ('PLUS', 'MINUS') edit = '\U0001F4DD'
26.058824
54
0.654628
0
0
0
0
0
0
0
0
196
0.442438
88059d921ab4392734ab0df3051f19d38efd4fa5
1,131
py
Python
TFBertForMaskedLM/main.py
Sniper970119/ExampleForTransformers
3348525957c38b2a45898d4f4652879933503b25
[ "Apache-2.0" ]
3
2021-01-24T04:55:46.000Z
2021-05-12T15:11:35.000Z
TFBertForMaskedLM/main.py
Sniper970119/ExampleForTransformers
3348525957c38b2a45898d4f4652879933503b25
[ "Apache-2.0" ]
null
null
null
TFBertForMaskedLM/main.py
Sniper970119/ExampleForTransformers
3348525957c38b2a45898d4f4652879933503b25
[ "Apache-2.0" ]
1
2021-01-24T04:55:53.000Z
2021-01-24T04:55:53.000Z
# -*- coding:utf-8 -*- """ ┏┛ ┻━━━━━┛ ┻┓ ┃       ┃ ┃   ━   ┃ ┃ ┳┛  ┗┳ ┃ ┃       ┃ ┃   ┻   ┃ ┃       ┃ ┗━┓   ┏━━━┛ ┃   ┃ 神兽保佑 ┃   ┃ 代码无BUG! ┃   ┗━━━━━━━━━┓ ┃CREATE BY SNIPER┣┓ ┃     ┏┛ ┗━┓ ┓ ┏━━━┳ ┓ ┏━┛ ┃ ┫ ┫ ┃ ┫ ┫ ┗━┻━┛ ┗━┻━┛ """ import tensorflow as tf import numpy as np for gpu in tf.config.experimental.list_physical_devices('GPU'): tf.config.experimental.set_memory_growth(gpu, True) from transformers import BertTokenizer, TFBertForMaskedLM tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True) inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") outputs = model(inputs) logits = outputs.logits output = np.argmax(logits[0][6]) o1 = tokenizer.decode(int(output)) inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf") outputs = model(inputs) logits = outputs.logits output = np.argmax(logits[0][4]) o2 = tokenizer.decode(int(output)) print()
21.75
78
0.546419
0
0
0
0
0
0
0
0
768
0.540464
8805a00d3b1fcbc6ac9137bed25cfb76407c9dfe
663
py
Python
mirari/TCS/migrations/0042_auto_20190726_0145.py
gcastellan0s/mirariapp
24a9db06d10f96c894d817ef7ccfeec2a25788b7
[ "MIT" ]
null
null
null
mirari/TCS/migrations/0042_auto_20190726_0145.py
gcastellan0s/mirariapp
24a9db06d10f96c894d817ef7ccfeec2a25788b7
[ "MIT" ]
18
2019-12-27T19:58:20.000Z
2022-02-27T08:17:49.000Z
mirari/TCS/migrations/0042_auto_20190726_0145.py
gcastellan0s/mirariapp
24a9db06d10f96c894d817ef7ccfeec2a25788b7
[ "MIT" ]
null
null
null
# Generated by Django 2.0.5 on 2019-07-26 06:45 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('TCS', '0041_auto_20190726_0030'), ] operations = [ migrations.AlterModelOptions( name='modelo', options={'default_permissions': [], 'ordering': ['-id'], 'permissions': [('Can_View__Modelo', 'Ve modelos'), ('Can_Create__Modelo', 'Crea modelos'), ('Can_Update__Modelo', 'Modifica modelos'), ('Can_Delete__Modelo', 'Elimina modelos'), ('Can_Change__ModelTCS', 'Modifica modelos de equipo')], 'verbose_name': 'Modelo', 'verbose_name_plural': 'Modelos'}, ), ]
36.833333
365
0.653092
578
0.871795
0
0
0
0
0
0
375
0.565611
88063bdddf555a3761172dbc965029eec4f02090
6,071
py
Python
kornia/geometry/calibration/undistort.py
belltailjp/kornia
cfa3b6823d55e276893847f1c3f06ddf108c606a
[ "ECL-2.0", "Apache-2.0" ]
1
2022-01-06T00:36:04.000Z
2022-01-06T00:36:04.000Z
kornia/geometry/calibration/undistort.py
belltailjp/kornia
cfa3b6823d55e276893847f1c3f06ddf108c606a
[ "ECL-2.0", "Apache-2.0" ]
12
2021-09-26T14:07:49.000Z
2022-03-20T14:08:08.000Z
kornia/geometry/calibration/undistort.py
belltailjp/kornia
cfa3b6823d55e276893847f1c3f06ddf108c606a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import torch from kornia.geometry.linalg import transform_points from kornia.geometry.transform import remap from kornia.utils import create_meshgrid from .distort import distort_points, tilt_projection # Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384 def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor: r"""Compensate for lens distortion a set of 2D image points. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: points: Input image points with shape :math:`(*, N, 2)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted 2D points with shape :math:`(*, N, 2)`. Example: >>> _ = torch.manual_seed(0) >>> x = torch.rand(1, 4, 2) >>> K = torch.eye(3)[None] >>> dist = torch.rand(1, 4) >>> undistort_points(x, K, dist) tensor([[[-0.1513, -0.1165], [ 0.0711, 0.1100], [-0.0697, 0.0228], [-0.1843, -0.1606]]]) """ if points.dim() < 2 and points.shape[-1] != 2: raise ValueError(f'points shape is invalid. Got {points.shape}.') if K.shape[-2:] != (3, 3): raise ValueError(f'K matrix shape is invalid. Got {K.shape}.') if dist.shape[-1] not in [4, 5, 8, 12, 14]: raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}") # Adding zeros to obtain vector with 14 coeffs. if dist.shape[-1] < 14: dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]]) # Convert 2D points from pixels to normalized camera coordinates cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1) cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1) fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1) fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1) # This is equivalent to K^-1 [u,v,1]^T x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN # Compensate for tilt distortion if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0): inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True) # Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1]) x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1) # Iteratively undistort points x0, y0 = x, y for _ in range(5): r2 = x * x + y * y inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / ( 1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3 ) deltaX = ( 2 * dist[..., 2:3] * x * y + dist[..., 3:4] * (r2 + 2 * x * x) + dist[..., 8:9] * r2 + dist[..., 9:10] * r2 * r2 ) deltaY = ( dist[..., 2:3] * (r2 + 2 * y * y) + 2 * dist[..., 3:4] * x * y + dist[..., 10:11] * r2 + dist[..., 11:12] * r2 * r2 ) x = (x0 - deltaX) * inv_rad_poly y = (y0 - deltaY) * inv_rad_poly # Convert points from normalized camera coordinates to pixel coordinates x = fx * x + cx y = fy * y + cy return torch.stack([x, y], -1) # Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287 def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor: r"""Compensate an image for lens distortion. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: image: Input image with shape :math:`(*, C, H, W)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted image with shape :math:`(*, C, H, W)`. Example: >>> img = torch.rand(1, 3, 5, 5) >>> K = torch.eye(3)[None] >>> dist_coeff = torch.rand(4) >>> out = undistort_image(img, K, dist_coeff) >>> out.shape torch.Size([1, 3, 5, 5]) """ if len(image.shape) < 2: raise ValueError(f"Image shape is invalid. Got: {image.shape}.") if K.shape[-2:] != (3, 3): raise ValueError(f'K matrix shape is invalid. Got {K.shape}.') if dist.shape[-1] not in [4, 5, 8, 12, 14]: raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.') if not image.is_floating_point(): raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.') B, _, rows, cols = image.shape # Create point coordinates for each pixel of the image xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype) pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates # Distort points and define maps ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2 mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float # Remap image to undistort out = remap(image, mapx, mapy, align_corners=True) return out
39.679739
109
0.567452
0
0
0
0
0
0
0
0
3,308
0.544886
8806b7d99f0084120c35f1e69100c53537ba82bc
422
py
Python
Tests/Aula_7a.py
o-Ian/Practice-Python
1e4b2d0788e70006096a53a7cf038db3148ba4b7
[ "MIT" ]
4
2021-04-23T18:07:58.000Z
2021-05-12T11:38:14.000Z
Tests/Aula_7a.py
o-Ian/Practice-Python
1e4b2d0788e70006096a53a7cf038db3148ba4b7
[ "MIT" ]
null
null
null
Tests/Aula_7a.py
o-Ian/Practice-Python
1e4b2d0788e70006096a53a7cf038db3148ba4b7
[ "MIT" ]
null
null
null
n1 = int(input('Digite um valor: ')) n2 = int(input('Digite outro valor: ')) print('A soma é: {}!' .format(n1+n2)) print('A subtração entre {} e {} é {}!' .format(n1, n2, n1-n2)) print('A multiplicação desses valores é {}!' .format(n1 * n2)) print('A divisão entre {} e {} é {:.3}' .format(n1, n2, n1/n2)) print('A divisão sem restos é {}!' .format(n1//n2), end = ' ') print('O resto dessa divisão é {}' .format(n1 % n2))
46.888889
63
0.601896
0
0
0
0
0
0
0
0
232
0.533333
880784410cfda04eacd518622e54861cdb7a1605
6,288
py
Python
manubot/cite/tests/test_citekey_api.py
shuvro-zz/manubot
9023b7fbfa0b235c14a4d702516bc0cd6d3101ed
[ "BSD-3-Clause" ]
1
2019-11-11T05:17:28.000Z
2019-11-11T05:17:28.000Z
manubot/cite/tests/test_citekey_api.py
shuvro-zz/manubot
9023b7fbfa0b235c14a4d702516bc0cd6d3101ed
[ "BSD-3-Clause" ]
null
null
null
manubot/cite/tests/test_citekey_api.py
shuvro-zz/manubot
9023b7fbfa0b235c14a4d702516bc0cd6d3101ed
[ "BSD-3-Clause" ]
null
null
null
"""Tests API-level functions in manubot.cite. Both functions are found in citekey.py""" import pytest from manubot.cite import citekey_to_csl_item, standardize_citekey @pytest.mark.parametrize( "citekey,expected", [ ("doi:10.5061/DRYAD.q447c/1", "doi:10.5061/dryad.q447c/1"), ("doi:10.5061/dryad.q447c/1", "doi:10.5061/dryad.q447c/1"), ("doi:10/b6vnmd", "doi:10.1016/s0933-3657(96)00367-3"), ("doi:10/B6VNMD", "doi:10.1016/s0933-3657(96)00367-3"), ( "doi:10/xxxxxxxxxxxxxYY", "doi:10/xxxxxxxxxxxxxyy", ), # passthrough non-existent shortDOI ("pmid:24159271", "pmid:24159271"), ("isbn:1339919885", "isbn:9781339919881"), ("isbn:1-339-91988-5", "isbn:9781339919881"), ("isbn:978-0-387-95069-3", "isbn:9780387950693"), ("isbn:9780387950938", "isbn:9780387950938"), ("isbn:1-55860-510-X", "isbn:9781558605107"), ("isbn:1-55860-510-x", "isbn:9781558605107"), ], ) def test_standardize_citekey(citekey, expected): """ Standardize identifiers based on their source """ output = standardize_citekey(citekey) assert output == expected @pytest.mark.xfail(reason="https://twitter.com/dhimmel/status/950443969313419264") def test_citekey_to_csl_item_doi_datacite(): citekey = "doi:10.7287/peerj.preprints.3100v1" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "11cb5HXoY" assert csl_item["URL"] == "https://doi.org/10.7287/peerj.preprints.3100v1" assert csl_item["DOI"] == "10.7287/peerj.preprints.3100v1" assert csl_item["type"] == "report" assert ( csl_item["title"] == "Sci-Hub provides access to nearly all scholarly literature" ) authors = csl_item["author"] assert authors[0]["family"] == "Himmelstein" assert authors[-1]["family"] == "Greene" def test_citekey_to_csl_item_arxiv(): citekey = "arxiv:cond-mat/0703470v2" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "ES92tcdg" assert csl_item["URL"] == "https://arxiv.org/abs/cond-mat/0703470v2" assert csl_item["number"] == "cond-mat/0703470v2" assert csl_item["version"] == "2" assert csl_item["type"] == "report" assert csl_item["container-title"] == "arXiv" assert csl_item["title"] == "Portraits of Complex Networks" authors = csl_item["author"] assert authors[0]["literal"] == "J. P. Bagrow" assert csl_item["DOI"] == "10.1209/0295-5075/81/68004" def test_citekey_to_csl_item_pmc(): """ https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534 """ citekey = f"pmcid:PMC3041534" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "RoOhUFKU" assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041534/" assert csl_item["container-title-short"] == "Summit Transl Bioinform" assert ( csl_item["title"] == "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities" ) authors = csl_item["author"] assert authors[0]["family"] == "Botsis" assert csl_item["PMID"] == "21347133" assert csl_item["PMCID"] == "PMC3041534" assert "generated by Manubot" in csl_item["note"] assert "standard_id: pmcid:PMC3041534" in csl_item["note"] def test_citekey_to_csl_item_pubmed_1(): """ Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full """ citekey = "pmid:21347133" csl_item = citekey_to_csl_item(citekey) assert csl_item["id"] == "y9ONtSZ9" assert csl_item["type"] == "article-journal" assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/21347133" assert csl_item["container-title"] == "Summit on translational bioinformatics" assert ( csl_item["title"] == "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities." ) assert csl_item["issued"]["date-parts"] == [[2010, 3, 1]] authors = csl_item["author"] assert authors[0]["given"] == "Taxiarchis" assert authors[0]["family"] == "Botsis" assert csl_item["PMID"] == "21347133" assert csl_item["PMCID"] == "PMC3041534" def test_citekey_to_csl_item_pubmed_2(): """ Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full """ citekey = "pmid:27094199" csl_item = citekey_to_csl_item(citekey) print(csl_item) assert csl_item["id"] == "alaFV9OY" assert csl_item["type"] == "article-journal" assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/27094199" assert csl_item["container-title"] == "Circulation. Cardiovascular genetics" assert csl_item["container-title-short"] == "Circ Cardiovasc Genet" assert csl_item["page"] == "179-84" assert ( csl_item["title"] == "Genetic Association-Guided Analysis of Gene Networks for the Study of Complex Traits." ) assert csl_item["issued"]["date-parts"] == [[2016, 4]] authors = csl_item["author"] assert authors[0]["given"] == "Casey S" assert authors[0]["family"] == "Greene" assert csl_item["PMID"] == "27094199" assert csl_item["DOI"] == "10.1161/circgenetics.115.001181" def test_citekey_to_csl_item_pubmed_with_numeric_month(): """ Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full See https://github.com/manubot/manubot/issues/69 """ citekey = "pmid:29028984" csl_item = citekey_to_csl_item(citekey) print(csl_item) assert csl_item["issued"]["date-parts"] == [[2018, 3, 15]] def test_citekey_to_csl_item_pubmed_book(): """ Extracting CSL metadata from books in PubMed is not supported. Logic not implemented to parse XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full """ with pytest.raises(NotImplementedError): citekey_to_csl_item("pmid:29227604") def test_citekey_to_csl_item_isbn(): csl_item = citekey_to_csl_item("isbn:9780387950693") assert csl_item["type"] == "book" assert csl_item["title"] == "Complex analysis"
38.109091
98
0.667144
0
0
0
0
1,707
0.271469
0
0
3,326
0.528944
8808d379a8ce975e29508dea21a42397452fc552
2,489
py
Python
vispy/io/datasets.py
hmaarrfk/vispy
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
[ "BSD-3-Clause" ]
2,617
2015-01-02T07:52:18.000Z
2022-03-29T19:31:15.000Z
vispy/io/datasets.py
hmaarrfk/vispy
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
[ "BSD-3-Clause" ]
1,674
2015-01-01T00:36:08.000Z
2022-03-31T19:35:56.000Z
vispy/io/datasets.py
hmaarrfk/vispy
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
[ "BSD-3-Clause" ]
719
2015-01-10T14:25:00.000Z
2022-03-02T13:24:56.000Z
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. import numpy as np from os import path as op from ..util import load_data_file # This is the package data dir, not the dir for config, etc. DATA_DIR = op.join(op.dirname(__file__), '_data') def load_iris(): """Load the iris dataset Returns ------- iris : NpzFile data['data'] : a (150, 4) NumPy array with the iris' features data['group'] : a (150,) NumPy array with the iris' group """ return np.load(load_data_file('iris/iris.npz', force_download='2014-09-04')) def load_crate(): """Load an image of a crate Returns ------- crate : array 256x256x3 crate image. """ return np.load(load_data_file('orig/crate.npz'))['crate'] def pack_unit(value): """Packs float values between [0,1] into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel """ pack = np.zeros(value.shape + (4,), dtype=np.ubyte) for i in range(4): value, pack[..., i] = np.modf(value * 256.) return pack def pack_ieee(value): """Packs float ieee binary representation into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel """ return np.fromstring(value.tobytes(), np.ubyte).reshape((value.shape + (4,))) def load_spatial_filters(packed=True): """Load spatial-filters kernel Parameters ---------- packed : bool Whether or not the data should be in "packed" representation for use in GLSL code. Returns ------- kernel : array 16x1024x4 (packed float in rgba) or 16x1024 (unpacked float) 16 interpolation kernel with length 1024 each. names : tuple of strings Respective interpolation names, plus "Nearest" which does not require a filter but can still be used """ names = ("Bilinear", "Hanning", "Hamming", "Hermite", "Kaiser", "Quadric", "Bicubic", "CatRom", "Mitchell", "Spline16", "Spline36", "Gaussian", "Bessel", "Sinc", "Lanczos", "Blackman", "Nearest") kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy')) if packed: # convert the kernel to a packed representation kernel = pack_unit(kernel) return kernel, names
26.2
73
0.60225
0
0
0
0
0
0
0
0
1,612
0.64765
8809a9e20076798a2ad0ec40dc57152d0a032e41
13,731
py
Python
universal_portfolio/knapsack.py
jehung/universal_portfolio
de731a6166ff057c8d6f3f73f80f9aca151805fa
[ "CC-BY-3.0" ]
14
2017-03-01T07:54:17.000Z
2021-10-10T11:07:56.000Z
universal_portfolio/knapsack.py
jehung/universal_portfolio
de731a6166ff057c8d6f3f73f80f9aca151805fa
[ "CC-BY-3.0" ]
null
null
null
universal_portfolio/knapsack.py
jehung/universal_portfolio
de731a6166ff057c8d6f3f73f80f9aca151805fa
[ "CC-BY-3.0" ]
3
2017-06-27T10:18:03.000Z
2020-07-03T01:29:56.000Z
# -*- coding: utf-8 -*- from __future__ import print_function import numpy as np np.random.seed(1335) # for reproducibility np.set_printoptions(precision=5, suppress=True, linewidth=150) import os import pandas as pd import backtest as twp from matplotlib import pyplot as plt from sklearn import metrics, preprocessing from talib.abstract import * from sklearn.externals import joblib import quandl import random, timeit from sklearn import preprocessing from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.layers.recurrent import LSTM from keras.optimizers import RMSprop, Adam ''' Name: The Self Learning Quant, Example 3 Author: Daniel Zakrisson Created: 30/03/2016 Copyright: (c) Daniel Zakrisson 2016 Licence: BSD Requirements: Numpy Pandas MatplotLib scikit-learn TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html Keras, https://keras.io/ Quandl, https://www.quandl.com/tools/python backtest.py from the TWP library. Download backtest.py and put in the same folder /plt create a subfolder in the same directory where plot files will be saved ''' def get_ticker(x): return x.split('/')[-1].split('.')[0] def read_file(file, test=None): scaler = preprocessing.MinMaxScaler() d = pd.read_csv(file).set_index('Date') d.fillna(0, inplace=True) ticker = get_ticker(file) d['ticker'] = ticker d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close', 'Volume (BTC)': 'volume'}, inplace=True) x_train = d.iloc[:-100, ] x_test = d.iloc[-100:, ] if test: return x_test, ticker else: return x_train, ticker # Initialize first state, all items are placed deterministically def init_state(file, test): d, ticker = read_file(file, test=test) xdata = pd.DataFrame() scaler = preprocessing.StandardScaler() xdata['adj_close'] = d['adj_close'] # .values xdata['diff'] = xdata['adj_close'].diff(periods=1) xdata['diff'].fillna(0, inplace=True) xdata['sma15'] = SMA(d, timeperiod=15) xdata['sma60'] = SMA(d, timeperiod=60) xdata['rsi'] = RSI(d, timeperiod=14) xdata['atr'] = ATR(d, timeperiod=14) xdata.fillna(0, inplace=True) # --- Preprocess data # xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr)) xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns) xdata['ticker'] = ticker pivot_columns = xdata.columns[0:-1] pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()] return pivot def all_init_data(test=False): filepath = 'util/stock_dfs/' all = [] scaler = preprocessing.StandardScaler() for f in os.listdir(filepath): datapath = os.path.join(filepath, f) if datapath.endswith('.csv'): # print(datapath) Res = init_state(datapath, test=test) all.append(Res) all = pd.concat(all, axis=1) all.fillna(0, inplace=True) closecol = [col for col in all.columns if 'adj_close' in col] close = all[closecol].values # xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr)) xdata = np.vstack(all.values) xdata = np.nan_to_num(xdata) if test == False: scaler = preprocessing.StandardScaler() xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1) joblib.dump(scaler, 'data/scaler.pkl') else: scaler = joblib.load('data/scaler.pkl') xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1) state = xdata[0:1, 0:1, :] return state, xdata, close # Take Action def take_action(state, xdata, action, signal, time_step): # this should generate a list of trade signals that at evaluation time are fed to the backtester # the backtester should get a list of trade signals and a list of price data for the assett # make necessary adjustments to state and then return it time_step += 1 # if the current iteration is the last state ("terminal state") then set terminal_state to 1 if time_step + 1 == xdata.shape[0]: state = xdata[time_step - 1:time_step, 0:1, :] terminal_state = 1 signal.loc[time_step] = 0 return state, time_step, signal, terminal_state # move the market data window one step forward state = xdata[time_step - 1:time_step, 0:1, :] # take action if action == 1: signal.loc[time_step] = 100 elif action == 2: signal.loc[time_step] = -100 else: signal.loc[time_step] = 0 # print(state) terminal_state = 0 # print(signal) return state, time_step, signal, terminal_state # Get Reward, the reward is returned at the end of an episode def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0): reward = 0 signal.fillna(value=0, inplace=True) if eval == False: try: bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values), signal[time_step - 2:time_step], signalType='shares') reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1]) except: pass if terminal_state == 1 and eval == True: bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares') reward = bt.pnl.iloc[-1] plt.figure(figsize=(9, 16)) bt.plotTrades() plt.axvline(x=400, color='black', linestyle='--') plt.text(250, 400, 'training data') plt.text(450, 400, 'test data') plt.suptitle(str(epoch)) plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png') plt.close('all') ''' # save a figure of the test set plt.figure(figsize=(10, 25)) for i in range(xdata.T.shape[0]): #frame = pd.concat(btFrame, axis=1) bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares') reward += np.max(bt.pnl.iloc[-1]) bt.plotTrades() #plt.axvline(x=400, color='black', linestyle='--') #plt.text(250, 400, 'training data') #plt.text(450, 400, 'test data') #plt.suptitle(str(epoch)) plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72) plt.close('all') ''' # print(time_step, terminal_state, eval, reward) return reward def evaluate_Q(eval_data, eval_model, epoch=0): # This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions signal = pd.Series(index=np.arange(len(eval_data))) state, xdata, price_data = all_init_data() status = 1 terminal_state = 0 time_step = 1 while (status == 1): # We start in state S qval = eval_model.predict(state, batch_size=batch_size) action = (np.argmax(qval)) # Take action, observe new state S' new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step) # Observe reward eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True, epoch=epoch) state = new_state if terminal_state == 1: # terminal state status = 0 return eval_reward if __name__ == "__main__": # This neural network is the the Q-function, run it like this: # model.predict(state.reshape(1,64), batch_size=1) batch_size = 7 num_features = 2544 epochs = 3 gamma = 0.95 # since the reward can be several time steps away, make gamma high epsilon = 1 batchSize = 100 buffer = 200 replay = [] learning_progress = [] model = Sequential() model.add(LSTM(64, input_shape=(1, num_features), return_sequences=True, stateful=False)) model.add(Dropout(0.5)) model.add(LSTM(64, input_shape=(1, num_features), return_sequences=False, stateful=False)) model.add(Dropout(0.5)) model.add(Dense(4, init='lecun_uniform')) model.add(Activation('linear')) # linear output so we can have range of real-valued outputs rms = RMSprop() adam = Adam() model.compile(loss='mse', optimizer=adam) start_time = timeit.default_timer() # read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle astate, xdata, aprice_data = all_init_data() bstate, test_data, test_price_data = all_init_data(test=True) ''' bstate, test_data, test_price_data = all_init_data(test=True) print(astate.shape) print(bstate.shape) print(xdata.shape) print(test_data.shape) print(price_data.shape) print(test_price_data.shape) ''' # stores tuples of (S, A, R, S') h = 0 # signal = pd.Series(index=market_data.index) signal = pd.Series(index=np.arange(len(xdata))) for i in range(epochs): if i == epochs - 1: # the last epoch, use test data set state, xdata, price_data = all_init_data() else: state, xdata, price_data = all_init_data(test=True) status = 1 terminal_state = 0 time_step = 5 # while game still in progress while (status == 1): # We are in state S # Let's run our Q function on S to get Q values for all possible actions print('epoch ' + str(i)) qval = model.predict(state, batch_size=batch_size) if (random.random() < epsilon): # choose random action action = np.random.randint(0, 4) # assumes 4 different actions else: # choose best action from Q(s,a) values action = (np.argmax(qval)) # Take action, observe new state S' new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step) # Observe reward reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state) print('new_state', new_state) print('reward', reward) # Experience replay storage if (len(replay) < buffer): # if buffer not filled, add to it replay.append((state, action, reward, new_state)) # print(time_step, reward, terminal_state) else: # if buffer full, overwrite old values if (h < (buffer - 1)): h += 1 else: h = 0 replay[h] = (state, action, reward, new_state) # randomly sample our experience replay memory minibatch = random.sample(replay, batchSize) X_train = [] y_train = [] for memory in minibatch: # Get max_Q(S',a) old_state, action, reward, new_state = memory old_qval = model.predict(old_state, batch_size=batch_size) newQ = model.predict(new_state, batch_size=batch_size) maxQ = np.max(newQ) y = np.zeros((1, 4)) y[:] = old_qval[:] if terminal_state == 0: # non-terminal state update = (reward + (gamma * maxQ)) else: # terminal state update = reward # print('rewardbase', reward) # print('update', update) y[0][action] = update # print(time_step, reward, terminal_state) X_train.append(old_state) y_train.append(y.reshape(4, )) X_train = np.squeeze(np.array(X_train), axis=(1)) y_train = np.array(y_train) model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0) state = new_state if terminal_state == 1: # if reached terminal state, update epoch status status = 0 eval_reward = evaluate_Q(test_data, model, i) # eval_reward = value_iter(test_data, epsilon, epochs) learning_progress.append(eval_reward) print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon)) # learning_progress.append((reward)) if epsilon > 0.1: # decrement epsilon over time epsilon -= (1.0 / epochs) elapsed = np.round(timeit.default_timer() - start_time, decimals=2) print("Completed in %f" % (elapsed,)) bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares') bt.data['delta'] = bt.data['shares'].diff().fillna(0) print(bt.data) bt.data.to_csv('plt/knapsack_data.csv') unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True) print(np.asarray((unique, counts)).T) plt.figure() plt.subplot(3, 1, 1) bt.plotTrades() plt.subplot(3, 1, 2) bt.pnl.plot(style='x-') plt.subplot(3, 1, 3) plt.plot(learning_progress) print('to plot', learning_progress) plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72) plt.show()
36.134211
145
0.610516
0
0
0
0
0
0
0
0
4,354
0.317093
880a98e6cfdd279e5621d17d6384a4912cab6353
7,165
py
Python
experiments/experiment_01.py
bask0/q10hybrid
9b18af9dd382c65dd667139f97e7da0241091a2c
[ "Apache-2.0" ]
2
2021-05-05T13:37:58.000Z
2021-05-05T15:11:07.000Z
experiments/experiment_01.py
bask0/q10hybrid
9b18af9dd382c65dd667139f97e7da0241091a2c
[ "Apache-2.0" ]
null
null
null
experiments/experiment_01.py
bask0/q10hybrid
9b18af9dd382c65dd667139f97e7da0241091a2c
[ "Apache-2.0" ]
1
2021-11-23T18:13:08.000Z
2021-11-23T18:13:08.000Z
import pytorch_lightning as pl import optuna import xarray as xr from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint import os import shutil from argparse import ArgumentParser from datetime import datetime from project.fluxdata import FluxData from models.hybrid import Q10Model # Hardcoded `Trainer` args. Note that these cannot be passed via cli. TRAINER_ARGS = dict( max_epochs=100, log_every_n_steps=1, weights_summary=None ) class Objective(object): def __init__(self, args): self.args = args def __call__(self, trial: optuna.trial.Trial) -> float: q10_init = trial.suggest_float('q10_init', 0.0001, 1000.) seed = trial.suggest_int('seed', 0, 999999999999) use_ta = trial.suggest_categorical('use_ta', [True, False]) dropout = trial.suggest_float('dropout', 0.0, 1.0) if use_ta: features = ['sw_pot', 'dsw_pot', 'ta'] else: features = ['sw_pot', 'dsw_pot'] pl.seed_everything(seed) # Further variables used in the hybrid model. physical = ['ta'] # Target (multiple targets not possible currently). targets = ['reco'] # Find variables that are only needed in physical model but not in NN. physical_exclusive = [v for v in physical if v not in features] # ------------ # data # ------------ ds = xr.open_dataset(self.args.data_path) fluxdata = FluxData( ds, features=features + physical_exclusive, targets=targets, context_size=1, train_time=slice('2003-01-01', '2006-12-31'), valid_time=slice('2007-01-01', '2007-12-31'), test_time=slice('2008-01-01', '2008-12-31'), batch_size=self.args.batch_size, data_loader_kwargs={'num_workers': 4}) train_loader = fluxdata.train_dataloader() val_loader = fluxdata.val_dataloader() test_loader = fluxdata.test_dataloader() # Create empty xr.Dataset, will be used by the model to save predictions every epoch. max_epochs = TRAINER_ARGS['max_epochs'] ds_pred = fluxdata.target_xr('valid', varnames=['reco', 'rb'], num_epochs=max_epochs) # ------------ # model # ------------ model = Q10Model( features=features, targets=targets, norm=fluxdata._norm, ds=ds_pred, q10_init=q10_init, hidden_dim=self.args.hidden_dim, num_layers=self.args.num_layers, learning_rate=self.args.learning_rate, dropout=dropout, weight_decay=self.args.weight_decay, num_steps=len(train_loader) * max_epochs) # ------------ # training # ------------ trainer = pl.Trainer.from_argparse_args( self.args, default_root_dir=self.args.log_dir, **TRAINER_ARGS, callbacks=[ EarlyStopping( monitor='valid_loss', patience=10, min_delta=0.00001), ModelCheckpoint( filename='{epoch}-{val_loss:.2f}', save_top_k=1, verbose=False, monitor='valid_loss', mode='min', prefix=model.__class__.__name__) ]) trainer.fit(model, train_loader, val_loader) # ------------ # testing # ------------ # trainer.test(test_dataloaders=test_loader) # ------------ # save results # ------------ # Store predictions. ds = fluxdata.add_scalar_record(model.ds, varname='q10', x=model.q10_history) trial.set_user_attr('q10', ds.q10[-1].item()) # Add some attributes that are required for analysis. ds.attrs = { 'created': datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'author': '[email protected]', 'q10_init': q10_init, 'dropout': dropout, 'use_ta': int(use_ta), 'loss': trainer.callback_metrics['valid_loss'].item() } ds = ds.isel(epoch=slice(0, trainer.current_epoch + 1)) # Save data. save_dir = os.path.join(model.logger.log_dir, 'predictions.nc') print(f'Saving predictions to: {save_dir}') ds.to_netcdf(save_dir) return trainer.callback_metrics['valid_loss'].item() @staticmethod def add_project_specific_args(parent_parser: ArgumentParser) -> ArgumentParser: parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument( '--batch_size', default=240, type=int) parser.add_argument( '--data_path', default='./data/Synthetic4BookChap.nc', type=str) parser.add_argument( '--log_dir', default='./logs/experiment_01/', type=str) return parser def main(parser: ArgumentParser = None, **kwargs): """Use kwargs to overload argparse args.""" # ------------ # args # ------------ if parser is None: parser = ArgumentParser() parser = Objective.add_project_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) parser = Q10Model.add_model_specific_args(parser) parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits') parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).') args = parser.parse_args() globargs = TRAINER_ARGS.copy() globargs.update(kwargs) for k, v in globargs.items(): setattr(args, k, v) # ------------ # study setup # ------------ search_space = { 'q10_init': [0.5, 1.5, 2.5], 'seed': [0] if args.single_seed else [i for i in range(10)], 'dropout': [0.0, 0.2, 0.4, 0.6], 'use_ta': [True, False] } sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db")) sql_path = f'sqlite:///{sql_file}' if args.create_study | (not os.path.isfile(sql_file)): if os.path.isdir(args.log_dir): shutil.rmtree(args.log_dir) os.makedirs(args.log_dir, exist_ok=True) study = optuna.create_study( study_name="q10hybrid", storage=sql_path, sampler=optuna.samplers.GridSampler(search_space), direction='minimize', load_if_exists=False) if args.create_study: return None if not os.path.isdir(args.log_dir): os.makedirs(args.log_dir) # ------------ # run study # ------------ n_trials = 1 for _, v in search_space.items(): n_trials *= len(v) study = optuna.load_study( study_name="q10hybrid", storage=sql_path, sampler=optuna.samplers.GridSampler(search_space)) study.optimize(Objective(args), n_trials=n_trials) if __name__ == '__main__': main()
32.130045
112
0.579204
4,563
0.636846
0
0
475
0.066294
0
0
1,560
0.217725
880bad578d9944f1ec06e580824fc923f1978b8e
2,886
py
Python
main.py
warifp/InstagramPostAndDelete
d22577325eccf42e629cef076ab43f7788587bc4
[ "MIT" ]
4
2019-06-03T04:00:51.000Z
2021-11-09T21:34:38.000Z
main.py
nittaya1990/InstagramPostAndDelete
d22577325eccf42e629cef076ab43f7788587bc4
[ "MIT" ]
null
null
null
main.py
nittaya1990/InstagramPostAndDelete
d22577325eccf42e629cef076ab43f7788587bc4
[ "MIT" ]
4
2019-10-30T19:44:08.000Z
2021-09-07T16:30:09.000Z
#! @@Author : WAHYU ARIF PURNOMO #! @@Create : 18 Januari 2019 #! @@Modify : 19 Januari 2019 #! Gambar dari reddit. #! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia. import os import json import requests import progressbar from PIL import Image from lxml import html from time import sleep from ImageDeleter import delete_png from InstagramAPI import InstagramAPI InstagramAPI = InstagramAPI(input("Username: "), input("Password: ")) while True: if (InstagramAPI.login()): break else: for x in range(300): os.system('cls') print(300-x) sleep(1) global useable useable = [] os.system('pause') def get_image(): print("Memulai mendapatkan gambar ..") json_raw = requests.get('https://www.reddit.com/r/me_irl/new/.json', headers = {'User-agent': 'Image_Testing_V3'}).json() json_data = json_raw['data'] json_children = json_data['children'] for x in range(len(json_children)): json_current = json_children[x] json_current_data = json_current['data'] json_current_url = json_current_data['url'] if "https://i.redd.it/" not in json_current_url: pass else: if json_current_url not in useable: useable.append(json_current_url) download() else: pass def download(): print("Memulai download ..") global filename new_filename = "" filename = useable[-1] filename = filename.replace("https://i.redd.it/", "") print(filename) f = open(filename, 'wb') f.write(requests.get(useable[-1]).content) f.close() if (filename[-3] + filename[-2] + filename[-1]) != 'jpg': im = Image.open(filename) for x in range(len(filename)-3): new_filename = new_filename + filename[x] im = im.convert("RGB") im.save("edit" + new_filename + 'jpg') new_filename = "edit" + new_filename + "jpg" print(new_filename) else: new_filename = filename upload(new_filename) def delete_image(bad_file): print("Memulai menghapus gambar ..") if (bad_file[0] + bad_file[1] + bad_file[2] + bad_file[3]) == "edit": png_bad_file = '' for x in range(len(bad_file)-3): png_bad_file = png_bad_file + bad_file[x] png_bad_file = png_bad_file + "png" try: os.remove(png_bad_file) except Exception as e: pass os.remove(bad_file) delete_png() print("Selesai.") wait() def upload(file): print("Memulai upload ..") caption = "" InstagramAPI.uploadPhoto(file, caption=caption) delete_image(file) def wait(): for i in progressbar.progressbar(range(1800)): sleep(1) while True: get_image() print("Gambar sukses di upload.") sleep(5) os.system('pause')
28.574257
125
0.615731
0
0
0
0
0
0
0
0
563
0.19508
880bba102de2d9226a037a90ff3d98814009f0c2
2,549
py
Python
pyspectator/collection.py
maximilionus/pyspectator-x
1265f1f39e7ca0534f9e6ffcd7087f2ebced3397
[ "BSD-3-Clause" ]
39
2017-02-27T15:21:21.000Z
2021-12-31T03:23:43.000Z
pyspectator/collection.py
maximilionus/pyspectator-x
1265f1f39e7ca0534f9e6ffcd7087f2ebced3397
[ "BSD-3-Clause" ]
18
2017-07-09T00:16:28.000Z
2021-12-03T21:01:38.000Z
pyspectator/collection.py
maximilionus/pyspectator-x
1265f1f39e7ca0534f9e6ffcd7087f2ebced3397
[ "BSD-3-Clause" ]
25
2017-03-05T07:59:34.000Z
2021-12-15T15:22:58.000Z
from collections import MutableMapping, Container from datetime import datetime, timedelta from pyvalid import accepts class LimitedTimeTable(MutableMapping, Container): def __init__(self, time_span): self.__storage = dict() self.__time_span = None self.time_span = time_span @property def time_span(self): return self.__time_span @time_span.setter @accepts(object, timedelta) def time_span(self, value): self.__time_span = value @property def oldest(self): value = None if self.__len__() > 0: value = min(self.__storage.keys()) return value @property def newest(self): value = None if self.__len__() > 0: value = max(self.__storage.keys()) return value def oldest_keys(self, size): for key in self.__get_slice(0, size): yield key def oldest_values(self, size): for key in self.oldest_keys(size): yield self.__storage.get(key) def oldest_items(self, size): for key in self.oldest_keys(size): yield (key, self.__storage.get(key)) def newest_keys(self, size): for key in self.__get_slice(-size, None): yield key def newest_values(self, size): for key in self.newest_keys(size): yield self.__storage.get(key) def newest_items(self, size): for key in self.newest_keys(size): yield (key, self.__storage.get(key)) def __get_slice(self, start, end): keys = sorted(self.keys()) return keys[start:end] def __getitem__(self, item): return self.__storage.__getitem__(item) @accepts(object, datetime, object) def __setitem__(self, key, value): now = datetime.now() if key > now: raise ValueError('Can\'t set item from future!') oldest = self.oldest if (oldest is not None) and (oldest != key): longest_time_span = now - oldest # Item is too old for current timetable if longest_time_span >= self.time_span: self.__delitem__(oldest) return self.__storage.__setitem__(key, value) def __delitem__(self, key): return self.__storage.__delitem__(key) def __len__(self): return self.__storage.__len__() def __iter__(self): return self.__storage.__iter__() def __contains__(self, item): return self.__storage.__contains__(item) __all__ = ['LimitedTimeTable']
27.117021
60
0.617497
2,394
0.939192
668
0.262064
993
0.389565
0
0
87
0.034131
880be95fb023fa99a8e4f0737f4b060a1751c3cd
576
py
Python
keyboardrow.py
AndySamoil/Elite_Code
7dc3b7b1b8688c932474f8a10fd2637fd2918bdd
[ "MIT" ]
null
null
null
keyboardrow.py
AndySamoil/Elite_Code
7dc3b7b1b8688c932474f8a10fd2637fd2918bdd
[ "MIT" ]
null
null
null
keyboardrow.py
AndySamoil/Elite_Code
7dc3b7b1b8688c932474f8a10fd2637fd2918bdd
[ "MIT" ]
null
null
null
def findWords(self, words: List[str]) -> List[str]: ''' sets and iterate through sets ''' every = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")] ans = [] for word in words: l = len(word) for sett in every: count = 0 for let in word: if let.lower() in sett: count += 1 if count == l: ans.append(word) return ans
27.428571
69
0.362847
0
0
0
0
0
0
0
0
78
0.135417
880bf5d9dd1fda0ba4fc9eafcb000337f1273e4d
1,673
py
Python
DFS_Backtracking/31. Next Permutation.py
xli1110/LC
3c18b8809c5a21a62903060eef659654e0595036
[ "MIT" ]
2
2021-04-02T11:57:46.000Z
2021-04-02T11:57:47.000Z
DFS_Backtracking/31. Next Permutation.py
xli1110/LC
3c18b8809c5a21a62903060eef659654e0595036
[ "MIT" ]
null
null
null
DFS_Backtracking/31. Next Permutation.py
xli1110/LC
3c18b8809c5a21a62903060eef659654e0595036
[ "MIT" ]
null
null
null
class Solution: def __init__(self): self.res = [] self.path = [] def arr_to_num(self, arr): s = "" for x in arr: s += str(x) return int(s) def find_position(self, nums): for i in range(len(self.res)): if self.res[i] == nums: if i == len(self.res) - 1: return 0 # we need the check below for duplicate elements in nums # run nums = [1, 5, 1] and see the case next_num = self.arr_to_num(self.res[i + 1]) if next_num > self.arr_to_num(nums): return i + 1 raise Exception("The permutation function has something wrong, please debug it.") def DFS(self, arr): if not arr: self.res.append(self.path[:]) return for i in range(len(arr)): self.path.append(arr[i]) self.DFS(arr[:i] + arr[i + 1:]) self.path.pop() def nextPermutation(self, nums: [int]) -> None: """ Do not return anything, modify nums in-place instead. """ if not nums: raise Exception("Empty Array") # all permutations # note that we need to SORT the array at first arr = nums[:] arr.sort() self.DFS(arr) # find position position = self.find_position(nums) # in-place replacement for i in range(len(nums)): nums[i] = self.res[position][i] if __name__ == "__main__": sol = Solution() # nums = [2, 1, 3] nums = [1, 5, 1] sol.nextPermutation(nums) print(sol.res)
26.140625
89
0.499701
1,528
0.913329
0
0
0
0
0
0
380
0.227137
880c149eaa01b78f766f6b8032706b3698b74fbc
1,392
py
Python
plugin/DataExport/extend.py
konradotto/TS
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
[ "Apache-2.0" ]
125
2015-01-22T05:43:23.000Z
2022-03-22T17:15:59.000Z
plugin/DataExport/extend.py
konradotto/TS
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
[ "Apache-2.0" ]
59
2015-02-10T09:13:06.000Z
2021-11-11T02:32:38.000Z
plugin/DataExport/extend.py
konradotto/TS
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
[ "Apache-2.0" ]
98
2015-01-17T01:25:10.000Z
2022-03-18T17:29:42.000Z
#!/usr/bin/python # Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved import subprocess import re pluginName = 'DataExport' pluginDir = "" networkFS = ["nfs", "cifs"] localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"] supportedFS = ",".join(localFS + networkFS) def test(bucket): return bucket def runProcess(exe): p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return iter(p.stdout.readline, b'') def runProcessAndReturnLastLine(exe): p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return p.stdout.readlines()[-1] def backupDevices(bucket): devices = "" cmd = "mount -l -t " + supportedFS for line in runProcess(cmd.split()): line_arr = line.split() folder = line_arr[2] fstype = line_arr[4] perms = line_arr[5] if perms.find('w') != -1: use = True if fstype in localFS: m = re.match('^(/media|/mnt)', folder) if not m: use = False if use: cmd2 = "df -h %s " % folder df = runProcessAndReturnLastLine(cmd2.split()) avail = df.split()[2] devices = devices + "<OPTION VALUE=\"" + folder + "\">" + folder + " (" + avail + " free, " + fstype + ")</option>" return devices
27.84
131
0.569684
0
0
0
0
0
0
0
0
246
0.176724
880c1d871834c4fa9a80907f77053c53af975688
5,205
py
Python
boids/biods_object.py
PaulAustin/sb7-pgz
fca3e50132b9d1894fb348b2082e83ce7b937b19
[ "MIT" ]
1
2022-02-21T15:54:01.000Z
2022-02-21T15:54:01.000Z
boids/biods_object.py
PaulAustin/sb7-pgz
fca3e50132b9d1894fb348b2082e83ce7b937b19
[ "MIT" ]
null
null
null
boids/biods_object.py
PaulAustin/sb7-pgz
fca3e50132b9d1894fb348b2082e83ce7b937b19
[ "MIT" ]
2
2020-11-21T16:34:22.000Z
2021-01-27T10:30:34.000Z
# Ported from JavaSript version to Python and Pygame Zero # Designed to work well with mu-editor environment. # # The original Javascript version wasdonw by Ben Eater # at https://github.com/beneater/boids (MIT License) # No endorsement implied. # # Complex numbers are are used as vectors to integrate x and y positions and velocities # MIT licesense (details in parent directory) import random import time HEIGHT = 500 # window height WIDTH = 900 # window width MARGIN = 150 # disstance to start avoid edge NUM_BOIDS = 75 VISUAL_RANGE = 70 # radius of influence for most algoriths SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast. SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow SPEED_INIT = 20 # range for random velocity MIN_DISTANCE = 10 # the distance to stay away from other boids AVOID_FACTOR = 0.05 # % location change if too close CENTERING_FACTOR = 0.050 # % location change to pull to center MATCHING_FACTOR = 0.015 # % velocity change if close MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge HISTORY_LENGTH = 30 BACK_COLOR = (0, 0, 90) BOID_COLOR = (255, 128, 128) BOID_SIZE = 8 TRAIL_COLOR = (255, 255, 64) g_boids = [] class Boid: def __init__(boid) : boid.loc = complex( (random.randint(0, WIDTH)), (random.randint(0, HEIGHT))) boid.vel = complex( (random.randint(-SPEED_INIT, SPEED_INIT)), (random.randint(-SPEED_INIT, SPEED_INIT))) boid.history = [] def keep_within_bounds(boid) : # Constrain a boid to within the window. If it gets too close to an edge, # nudge it back in and reverse its direction. if (boid.loc.real < MARGIN): boid.vel += MARGIN_FACTOR * 1.0 if (boid.loc.real > WIDTH - MARGIN) : boid.vel += MARGIN_FACTOR * -1.0 if (boid.loc.imag < MARGIN) : boid.vel += MARGIN_FACTOR * 1.0j if (boid.loc.imag > HEIGHT - MARGIN) : boid.vel += MARGIN_FACTOR * -1.0j def fly_towards_center(boid): # Find the center of mass of the other boids and # adjust velocity slightly to point towards the # center of mass. center = 0+0j num_neighbors = 0 for other_boid in g_boids : if abs(boid.loc - other_boid.loc) < VISUAL_RANGE : center += other_boid.loc num_neighbors += 1 if num_neighbors > 0 : center = center / num_neighbors boid.loc += (center - boid.loc) * CENTERING_FACTOR def avoid_others(boid): # Move away from other boids that are too close to avoid colliding move = 0+0j for other_boid in g_boids : if not (other_boid is boid) : if abs(boid.loc - other_boid.loc) < MIN_DISTANCE : move += boid.loc - other_boid.loc boid.vel += move * AVOID_FACTOR def match_velocity(boid): # Find the average velocity (speed and direction) # of the other boids and adjust velocity slightly to match. avg_vel = 0+0j num_neighbors = 0 for otherBoid in g_boids: if abs(boid.loc - otherBoid.loc) < VISUAL_RANGE : avg_vel += otherBoid.vel num_neighbors += 1 if num_neighbors > 0: avg_vel /= num_neighbors boid.vel += (avg_vel - boid.vel) * MATCHING_FACTOR def limit_speed(boid): # Speed will naturally vary in flocking behavior, # but real animals can't go arbitrarily fast (or slow) speed = abs(boid.vel) if (speed > SPEED_LIMIT_UPPER) : boid.vel = boid.vel / speed * SPEED_LIMIT_UPPER if (speed < SPEED_LIMIT_LOWER) : boid.vel = boid.vel / speed * SPEED_LIMIT_LOWER return def draw(boid): screen.draw.filled_circle((boid.loc.real, boid.loc.imag), BOID_SIZE, BOID_COLOR) tail = boid.loc + boid.vel * -1.8 screen.draw.line( (boid.loc.real, boid.loc.imag), (tail.real, tail.imag), BOID_COLOR) def draw_trail(boid): pt_from = (boid.loc.real, boid.loc.imag) for p in boid.history: pt_to = (p.real, p.imag) screen.draw.line(pt_from, pt_to, TRAIL_COLOR) pt_from = pt_to def draw(): screen.fill(BACK_COLOR) if keyboard.space: for boid in g_boids: boid.draw_trail() for boid in g_boids: boid.draw() screen.draw.text("space:tails r:restart", (20, 20)) def update(): for boid in g_boids: # Apply rules boid.fly_towards_center() boid.avoid_others() boid.match_velocity() boid.limit_speed() boid.keep_within_bounds() # Update the position based on the current velocity boid.loc += boid.vel boid.history.insert(0, boid.loc) boid.history = boid.history[:HISTORY_LENGTH] def init(): global g_boids g_boids = [Boid() for _ in range(NUM_BOIDS)] def on_key_down(key, mod, unicode): if (key == keys.R): init() init()
31.932515
88
0.602882
3,144
0.604035
0
0
0
0
0
0
1,331
0.255716
880d1df9e7fa8cda82be2e587cdbae5ea94afb44
4,960
py
Python
upoutdf/types/recurring/yearly.py
UpOut/UpOutDF
5d2f87884565d98b77e25c6a26af7dbea266be76
[ "MIT" ]
null
null
null
upoutdf/types/recurring/yearly.py
UpOut/UpOutDF
5d2f87884565d98b77e25c6a26af7dbea266be76
[ "MIT" ]
null
null
null
upoutdf/types/recurring/yearly.py
UpOut/UpOutDF
5d2f87884565d98b77e25c6a26af7dbea266be76
[ "MIT" ]
null
null
null
# coding: utf-8 import pytz from dateutil.relativedelta import relativedelta from .base import BaseRecurring from upoutdf.occurences import OccurenceBlock, OccurenceGroup from upoutdf.constants import YEARLY_TYPE class YearlyType(BaseRecurring): year_day = None required_attributes = [ 'every', 'timezone', 'starting_time', 'lasting_seconds', 'type', 'starting_date' ] def increment_by(self): return relativedelta(years=+self.every) def _snap_datetime(self,datetime,yearday): if datetime is None: return None snapper = self.snapping_class(self.timezone) return snapper.snap_to_year_day(datetime,yearday) def _canonicalize_date(self,date): if not date.tzinfo: date = date.replace(tzinfo=pytz.utc) if date.tzinfo != self.timezone: date = self.timezone.normalize(date.astimezone(self.timezone)) return date def canonicalize(self): canonical = "every %s year" % self.every if self.year_day is not None: canonical = "%s day %s" % ( canonical, self.year_day ) #(starting <datetimestring>) (ending <datetimestring>) if not self.starting_date_infinite: starting_date = self._canonicalize_date(self.starting_date) canonical = "%s starting %s" % ( canonical, starting_date.strftime("_%m/%d/%Y") ) if not self.ending_date_infinite: ending_date = self._canonicalize_date(self.ending_date) canonical = "%s ending %s" % ( canonical, ending_date.strftime("_%m/%d/%Y") ) if self.repeating_count is not None: canonical = "%s repeating %s times" % ( canonical, self.repeating_count ) starting_time = self._canonicalize_date(self.starting_time) canonical = "%s at %s" % ( canonical, starting_time.strftime("%-I:%M%p") ) canonical = "%s lasting %s seconds in %s" % ( canonical, self.lasting_seconds, str(self.timezone) ) return canonical def occurences(self): if not self.verify_parsed(): raise RuntimeError("Please call parse before calling occurences") ending = self.ending_date repeating_count = self.repeating_count ending_date_infinite = self.ending_date_infinite if repeating_count is not None: ending_date_infinite = False if ending is not None: ending = self._set_start_time(ending) ending = self._strip_microseconds(ending) occurence_start = self.starting_date if self.year_day is not None: try: occurence_start = self._snap_datetime(self.starting_date,self.year_day) except ValueError: #If we had a problem, try the next year occurence_start = self._snap_datetime( self.starting_date+relativedelta(years=+1), self.year_day ) occurence_start = self._set_start_time(occurence_start) occurence_start = self._strip_microseconds(occurence_start) occurence_block = OccurenceBlock( starting_date=occurence_start, ending_date=None, starting_date_infinite=self.starting_date_infinite, ending_date_infinite=ending_date_infinite, typeobj=self ) repeated = 1 occurence_end = None #While we're before the end date (if we have it) #And we're before the max repetetions (if we have it) while ((ending is None or occurence_start <= ending) and (repeating_count is None or repeated <= repeating_count)): occurence_end = self._get_end_datetime(occurence_start) occurence_end = self._strip_microseconds(occurence_end) occurence_block.add_occurence(occurence_start,occurence_end) occurence_start = self._increment_occurence(occurence_start) occurence_start = self._strip_microseconds(occurence_start) repeated+=1 occurence_block.ending_date = occurence_end #We always return a OccurenceGroup, even if just 1 return OccurenceGroup(blocks=[occurence_block]) def _parse_type(self,tokens): if tokens[0] == 'day': tokens = self._step_tokens(tokens) try: self.year_day = int(tokens[0]) except ValueError: raise ValueError("Invalid year day") tokens = self._step_tokens(tokens) self.type = YEARLY_TYPE return tokens
30.060606
87
0.594153
4,740
0.955645
0
0
0
0
0
0
547
0.110282
880d94d22915e741e24ad40b49de37d7ad8757e9
625
py
Python
project/urls.py
dbinetti/captable
29769b2b99a3185fda241b3087ccbe621f8c97a2
[ "BSD-2-Clause" ]
18
2016-05-12T18:49:09.000Z
2021-10-05T13:29:09.000Z
project/urls.py
dbinetti/captable
29769b2b99a3185fda241b3087ccbe621f8c97a2
[ "BSD-2-Clause" ]
null
null
null
project/urls.py
dbinetti/captable
29769b2b99a3185fda241b3087ccbe621f8c97a2
[ "BSD-2-Clause" ]
5
2015-08-28T02:50:30.000Z
2019-11-14T04:03:05.000Z
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.views.generic import TemplateView urlpatterns = patterns( '', url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'), url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^', include('apps.captable.urls',)), ) urlpatterns += staticfiles_urlpatterns()
31.25
85
0.7248
0
0
0
0
0
0
0
0
133
0.2128
880ea7ec7f81ab78d2446766017eac398be3d80f
9,388
py
Python
common/evaluators/bert_emotion_evaluator.py
marjanhs/procon20
c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b
[ "MIT" ]
5
2020-07-12T08:27:47.000Z
2021-10-16T11:40:48.000Z
common/evaluators/bert_emotion_evaluator.py
marjanhs/procon20
c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b
[ "MIT" ]
null
null
null
common/evaluators/bert_emotion_evaluator.py
marjanhs/procon20
c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b
[ "MIT" ]
1
2021-04-12T09:54:37.000Z
2021-04-12T09:54:37.000Z
import warnings import numpy as np import torch import torch.nn.functional as F from sklearn import metrics from torch.utils.data import DataLoader, SequentialSampler, TensorDataset from tqdm import tqdm from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \ convert_examples_to_hierarchical_features from utils.preprocessing import pad_input_matrix from utils.tokenization import BertTokenizer from utils.emotion import Emotion # Suppress warnings from sklearn.metrics warnings.filterwarnings('ignore') class BertEvaluator(object): def __init__(self, model, processor, args, split='dev'): self.args = args self.model = model self.processor = processor self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters) if split == 'test': self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name) elif split == 'dev': self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name) else: self.eval_examples = self.processor.get_any_examples(args.data_dir, split) def get_scores(self, silent=False, return_indices=False): all_indices = [] if self.args.is_hierarchical: eval_features = convert_examples_to_hierarchical_features( self.eval_examples, self.args.max_seq_length, self.tokenizer) else: eval_features = convert_examples_to_features_with_emotion( self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner) unpadded_input_ids = [f.input_ids for f in eval_features] unpadded_input_mask = [f.input_mask for f in eval_features] unpadded_segment_ids = [f.segment_ids for f in eval_features] unpadded_emotion_scores = [f.sentiment_scores for f in eval_features] if self.args.is_hierarchical: pad_input_matrix(unpadded_input_ids, self.args.max_doc_length) pad_input_matrix(unpadded_input_mask, self.args.max_doc_length) pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length) padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long) padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long) padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long) padded_emotion_ids = torch.tensor(unpadded_emotion_scores, dtype=torch.long) label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size) self.model.eval() total_loss = 0 nb_eval_steps, nb_eval_examples = 0, 0 predicted_labels, target_labels = list(), list() for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent): input_ids = input_ids.to(self.args.device) input_mask = input_mask.to(self.args.device) segment_ids = segment_ids.to(self.args.device) emotion_ids = emotion_ids.to(self.args.device) label_ids = label_ids.to(self.args.device) with torch.no_grad(): if return_indices: outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, return_indices=return_indices) else: outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids) if isinstance(outs, tuple): outs, _ = outs if return_indices: logits, indices = outs all_indices.extend(indices.cpu().detach().numpy()) else: logits = outs if self.args.is_multilabel: predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy()) target_labels.extend(label_ids.cpu().detach().numpy()) loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False) average, average_mac = 'micro', 'macro' else: predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy()) target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy()) loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1)) average, average_mac = 'binary', 'binary' if self.args.n_gpu > 1: loss = loss.mean() if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps total_loss += loss.item() nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels) accuracy = metrics.accuracy_score(target_labels, predicted_labels) precision = metrics.precision_score(target_labels, predicted_labels, average=average) recall = metrics.recall_score(target_labels, predicted_labels, average=average) avg_loss = total_loss / nb_eval_steps hamming_loss = metrics.hamming_loss(target_labels, predicted_labels) jaccard_score = metrics.jaccard_score(target_labels, predicted_labels, average=average) f1_micro = metrics.f1_score(target_labels, predicted_labels, average=average) f1_macro = metrics.f1_score(target_labels, predicted_labels, average=average_mac) if return_indices: return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels, all_indices],\ ['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels', 'all_indices'] else: return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels],\ ['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels'] def get_bert_layers(self, silent=False, last_bert_layers=-1): if self.args.is_hierarchical: eval_features = convert_examples_to_hierarchical_features( self.eval_examples, self.args.max_seq_length, self.tokenizer) else: eval_features = convert_examples_to_features_with_emotion( self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner) unpadded_input_ids = [f.input_ids for f in eval_features] unpadded_input_mask = [f.input_mask for f in eval_features] unpadded_segment_ids = [f.segment_ids for f in eval_features] unpadded_emotion_ids = [f.emotioniment_scores for f in eval_features] if self.args.is_hierarchical: pad_input_matrix(unpadded_input_ids, self.args.max_doc_length) pad_input_matrix(unpadded_input_mask, self.args.max_doc_length) pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length) padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long) padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long) padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long) padded_emotion_ids = torch.tensor(unpadded_emotion_ids, dtype=torch.long) label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size) self.model.eval() bert_layers_l, label_ids_l = [], [] for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent): input_ids = input_ids.to(self.args.device) input_mask = input_mask.to(self.args.device) segment_ids = segment_ids.to(self.args.device) emotion_ids = emotion_ids.to(self.args.device) label_ids = label_ids.to(self.args.device) with torch.no_grad(): bert_layers = self.model.get_bert_embedding(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, last_bert_layers=last_bert_layers) label_ids = torch.argmax(label_ids, dim=1).cpu().detach().numpy() bert_layers_l.extend(bert_layers) label_ids_l.extend(label_ids) bert_layers_l = torch.stack(bert_layers_l, dim=0) return bert_layers_l, label_ids_l
51.582418
166
0.678526
8,802
0.93758
0
0
0
0
0
0
362
0.03856
881189eb3c68f5eb6d4b3bde9fa97065430d1651
781
py
Python
model/mlp1.py
andrearosasco/DistilledReplay
2a4efa88d22b9afc7016f07549114688f346dbe8
[ "MIT" ]
7
2021-06-27T16:09:13.000Z
2022-03-17T20:02:55.000Z
model/mlp1.py
andrew-r96/DistilledReplay
2a4efa88d22b9afc7016f07549114688f346dbe8
[ "MIT" ]
null
null
null
model/mlp1.py
andrew-r96/DistilledReplay
2a4efa88d22b9afc7016f07549114688f346dbe8
[ "MIT" ]
null
null
null
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, config): super(Model, self).__init__() self.drop = nn.Dropout(config['dropout']) self.fc1 = nn.Linear(784, 2000) self.fc2 = nn.Linear(2000, 2000) self.fc3 = nn.Linear(2000, 2000) self.fc4 = nn.Linear(2000, 2000) self.fc5 = nn.Linear(2000, 10) def forward(self, x): # 784 -> 2000 x = F.relu(self.drop(self.fc1(x))) # 2000 -> 2000 x = F.relu(self.drop(self.fc2(x))) # 2000 -> 2000 x = F.relu(self.drop(self.fc3(x))) # 2000 -> 2000 x = F.relu(self.drop(self.fc4(x))) # 2000 -> 100 x = self.fc5(x) return x
28.925926
50
0.516005
721
0.923175
0
0
0
0
0
0
82
0.104994
8811e504a270f2f7246e1ece4241279f011e0643
745
py
Python
netbox/ipam/managers.py
aslafy-z/netbox
a5512dd4c46c005df8752fc330c1382ac22b31ea
[ "Apache-2.0" ]
1
2022-01-25T09:02:56.000Z
2022-01-25T09:02:56.000Z
netbox/ipam/managers.py
aslafy-z/netbox
a5512dd4c46c005df8752fc330c1382ac22b31ea
[ "Apache-2.0" ]
4
2021-06-08T22:29:06.000Z
2022-03-12T00:48:51.000Z
netbox/ipam/managers.py
aslafy-z/netbox
a5512dd4c46c005df8752fc330c1382ac22b31ea
[ "Apache-2.0" ]
null
null
null
from django.db import models from ipam.lookups import Host, Inet class IPAddressManager(models.Manager): def get_queryset(self): """ By default, PostgreSQL will order INETs with shorter (larger) prefix lengths ahead of those with longer (smaller) masks. This makes no sense when ordering IPs, which should be ordered solely by family and host address. We can use HOST() to extract just the host portion of the address (ignoring its mask), but we must then re-cast this value to INET() so that records will be ordered properly. We are essentially re-casting each IP address as a /32 or /128. """ qs = super().get_queryset() return qs.order_by(Inet(Host('address')))
41.388889
118
0.689933
676
0.907383
0
0
0
0
0
0
522
0.700671
881335d234ca66e078e1413e1e2269e82e80ed06
5,709
py
Python
train.py
VArdulov/learning-kis
2637f08d5e8027a22feff17064be45ea51f738e5
[ "MIT" ]
null
null
null
train.py
VArdulov/learning-kis
2637f08d5e8027a22feff17064be45ea51f738e5
[ "MIT" ]
null
null
null
train.py
VArdulov/learning-kis
2637f08d5e8027a22feff17064be45ea51f738e5
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 """ Learning Koopman Invariant Subspace (c) Naoya Takeishi, 2017. [email protected] """ import numpy as np np.random.seed(1234567890) from argparse import ArgumentParser from os import path import time from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner from losses import combined_loss from torch import device, save, manual_seed from torch.optim import SGD import matplotlib.pyplot as plt import seaborn as sns # -- Parse arguments t = time.time() parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)') parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment") parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model") parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for") parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into") parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no") parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space") parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl") parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting") parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag") parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space") parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by") parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate") parser.add_argument("--validation-data-path", "-v", type=str, default="") #ToDo: Implement parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set") if __name__ == "__main__": # grab the command line arguments cli_args = parser.parse_args() manual_seed(216) # find and load the training data data_path = cli_args.data_path print(f"Loading training data from {data_path}") data_train = np.load(data_path) if len(data_train.shape) == 1: data_train = data_train.reshape(-1, 1) print(f"Loaded a dataset with dimension: {data_train.shape}") validate = cli_args.validation_data_path != "" data_val = None if validate: data_path = cli_args.validation_data_path print(f"Loading validation data from {data_path}") data_val = np.load(data_path) # process the delay either set by the user or is set to one 10th of the data delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10) # based on the number of batches, delay, and size of the data compute the samples per batch samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches # construct the data preparer batch_iterator = TimeSeriesBatchMaker( y=data_train, batch_size=samples_per_batch, max_lag=delay ) if validate: val_batch_iterator = TimeSeriesBatchMaker( y=data_val, max_lag=delay ) # construct the end-to-end model lkis = KoopmanInvariantSubspaceLearner( observable_dim=data_train.shape[1], latent_dim=cli_args.state_space, intermediate_observable=cli_args.intermediate_observable, delay=delay ) if cli_args.gpu: device = device("cuda") # initialize the optimizer optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate) losses = [] val_losses = [] for epoch in range(cli_args.epochs): loss = 0 for b in range(cli_args.num_batches): optimizer.zero_grad() time_delayed_ys, y_true = next(batch_iterator) if cli_args.gpu: time_delayed_ys.to(device) y_true.to(device) g_pred, y_pred = lkis(time_delayed_ys) g_0 = g_pred[:-1] g_1 = g_pred[1:] batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1) batch_loss.backward() optimizer.step() loss += batch_loss.item() # display the epoch training loss print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}") losses.append(loss) if validate: y_time_delayed_val, y_true = next(val_batch_iterator) if cli_args.gpu: y_time_delayed_val.to(device) y_true.to(device) g_pred, y_pred = lkis(y_time_delayed_val) g_0 = g_pred[:-1] g_1 = g_pred[1:] batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1) val_loss = batch_loss.item() print(f"\tval-loss = {val_loss:.6f}") val_losses.append(val_loss) if cli_args.save_model: save(lkis, f"{cli_args.name}.torch.mdl") if cli_args.save_training_plot: sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss") if validate: sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss") plt.xlabel("Epochs") plt.ylabel("Combined Reconstruction and DMD Loss") plt.title(f"Training Loss for {cli_args.name}") plt.savefig(f"{cli_args.name}-training-loss.png")
38.574324
145
0.669294
0
0
0
0
0
0
0
0
1,781
0.311964
8813da3968ae4a879a3ffd1fca43f066e89df5ea
671
py
Python
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py
KenWoo/Algorithm
4012a2f0a099a502df1e5df2e39faa75fe6463e8
[ "Apache-2.0" ]
null
null
null
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py
KenWoo/Algorithm
4012a2f0a099a502df1e5df2e39faa75fe6463e8
[ "Apache-2.0" ]
null
null
null
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py
KenWoo/Algorithm
4012a2f0a099a502df1e5df2e39faa75fe6463e8
[ "Apache-2.0" ]
null
null
null
from typing import List class Solution: def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]: arr.sort() res = [] min_diff = arr[1] - arr[0] res.append([arr[0], arr[1]]) for i in range(1, len(arr)-1): diff = arr[i+1]-arr[i] if diff < min_diff: min_diff = diff res.clear() res.append([arr[i], arr[i+1]]) elif diff == min_diff: res.append([arr[i], arr[i+1]]) return res if __name__ == "__main__": s = Solution() result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27]) print(result)
26.84
70
0.490313
508
0.757079
0
0
0
0
0
0
10
0.014903
8814231575bbe6e4934834a1434e867f02c0e57d
2,125
py
Python
resources/physequations.py
VijayStroup/Physics_Problem_Solver_Basic
fc6944475ed8bcfe91bbd207734c3f9aee31e0fe
[ "MIT" ]
null
null
null
resources/physequations.py
VijayStroup/Physics_Problem_Solver_Basic
fc6944475ed8bcfe91bbd207734c3f9aee31e0fe
[ "MIT" ]
null
null
null
resources/physequations.py
VijayStroup/Physics_Problem_Solver_Basic
fc6944475ed8bcfe91bbd207734c3f9aee31e0fe
[ "MIT" ]
null
null
null
import math def close(expected, actual, maxerror): '''checks to see if the actual number is within expected +- maxerror.''' low = expected - maxerror high = expected + maxerror if actual >= low and actual <= high: return True else: return False def grav_potential_energy(mass, height, gravity=9.81): '''calculate potential energy given mass and height. Mass in kilograms and height in meters.''' gp_energy = mass * height * gravity return gp_energy def kin_energy(mass, velocity): '''calculate kinetic energy given mass and velocity. Mass in kilograms and velocity in meters per second.''' k_energy = .5 * mass * velocity ** 2 return k_energy def work_energy(force, displacement, angle): '''calculate work energy given force, displancement, and angle. Force in newtons, displacement in meters, angle in degrees.''' anglerad = math.radians(angle) cos = math.cos(anglerad) w_energy = force * displacement * cos return w_energy '''============================================================================= Tests =============================================================================''' if __name__ == '__main__': def check(funcname, args, expected, ans, maxerror): if not close(expected, ans, maxerror): print(f'{funcname}({args}) = {ans} should = {expected}') print(close(10, 11.1, 1)) print(close(100, 100.001, .01)) print(close(-10, -11.01, 1)) print(close(84756, 84300.2, 500.5)) #gravitional potential energy tests ans = grav_potential_energy(3.00, 7.00) check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001) ans = grav_potential_energy(2.00, 5.00) check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01) #kinetic energy tests ans = kin_energy(2, 6.55) check('kin_energy', '2, 6.55', 42.90, ans, 0.01) ans = kin_energy(5.65, 10) check('kin_energy', '5.65, 10', 282.5, ans, 0.1) #work energy tests ans = work_energy(500, 10, 0) check('work_energy', '500, 10, 0', 5000.0, ans, 0.1) ans = work_energy(150, 50, 45) check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
32.19697
88
0.631059
0
0
0
0
0
0
0
0
913
0.429647
71467296157c3ad9afffaf380b92ae10d722c419
10,659
py
Python
mvpa2/tests/test_erdataset.py
andycon/PyMVPA
67f7ee68012e3a1128168c583d6c83303b7a2c27
[ "MIT" ]
null
null
null
mvpa2/tests/test_erdataset.py
andycon/PyMVPA
67f7ee68012e3a1128168c583d6c83303b7a2c27
[ "MIT" ]
null
null
null
mvpa2/tests/test_erdataset.py
andycon/PyMVPA
67f7ee68012e3a1128168c583d6c83303b7a2c27
[ "MIT" ]
null
null
null
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the PyMVPA package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## '''Tests for the event-related dataset''' from mvpa2.testing import * from mvpa2.datasets import dataset_wizard from mvpa2.mappers.flatten import FlattenMapper from mvpa2.mappers.boxcar import BoxcarMapper from mvpa2.mappers.fx import FxMapper from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \ extract_boxcar_event_samples from mvpa2.datasets.sources import load_example_fmri_dataset from mvpa2.mappers.zscore import zscore def test_erdataset(): # 3 chunks, 5 targets, blocks of 5 samples each nchunks = 3 ntargets = 5 blocklength = 5 nfeatures = 10 targets = np.tile(np.repeat(range(ntargets), blocklength), nchunks) chunks = np.repeat(np.arange(nchunks), ntargets * blocklength) samples = np.repeat( np.arange(nchunks * ntargets * blocklength), nfeatures).reshape(-1, nfeatures) ds = dataset_wizard(samples, targets=targets, chunks=chunks) # check if events are determined properly evs = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks) for ev in evs: assert_equal(ev['duration'], blocklength) assert_equal(ntargets * nchunks, len(evs)) for t in range(ntargets): assert_equal(len([ev for ev in evs if ev['targets'] == t]), nchunks) # now turn `ds` into an eventreleated dataset erds = eventrelated_dataset(ds, evs) # the only unprefixed sample attributes are assert_equal(sorted([a for a in ds.sa if not a.startswith('event')]), ['chunks', 'targets']) # samples as expected? assert_array_equal(erds.samples[0], np.repeat(np.arange(blocklength), nfeatures)) # that should also be the temporal feature offset assert_array_equal(erds.samples[0], erds.fa.event_offsetidx) assert_array_equal(erds.sa.event_onsetidx, np.arange(0,71,5)) # finally we should see two mappers assert_equal(len(erds.a.mapper), 2) assert_true(isinstance(erds.a.mapper[0], BoxcarMapper)) assert_true(isinstance(erds.a.mapper[1], FlattenMapper)) # check alternative event mapper # this one does temporal compression by averaging erds_compress = eventrelated_dataset( ds, evs, event_mapper=FxMapper('features', np.mean)) assert_equal(len(erds), len(erds_compress)) assert_array_equal(erds_compress.samples[:,0], np.arange(2,73,5)) # # now check the same dataset with event descretization tr = 2.5 ds.sa['time'] = np.arange(nchunks * ntargets * blocklength) * tr evs = [{'onset': 4.9, 'duration': 6.2}] # doesn't work without conversion assert_raises(ValueError, eventrelated_dataset, ds, evs) erds = eventrelated_dataset(ds, evs, time_attr='time') assert_equal(len(erds), 1) assert_array_equal(erds.samples[0], np.repeat(np.arange(1,5), nfeatures)) assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']]) assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']]) assert_array_almost_equal(erds.sa.orig_offset, [2.4]) assert_array_equal(erds.sa.time, [np.arange(2.5, 11, 2.5)]) # now with closest match erds = eventrelated_dataset(ds, evs, time_attr='time', match='closest') expected_nsamples = 3 assert_equal(len(erds), 1) assert_array_equal(erds.samples[0], np.repeat(np.arange(2,2+expected_nsamples), nfeatures)) assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']]) assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']]) assert_array_almost_equal(erds.sa.orig_offset, [-0.1]) assert_array_equal(erds.sa.time, [np.arange(5.0, 11, 2.5)]) # now test the way back results = np.arange(erds.nfeatures) assert_array_equal(erds.a.mapper.reverse1(results), results.reshape(expected_nsamples, nfeatures)) # what about multiple results? nresults = 5 results = dataset_wizard([results] * nresults) # and let's have an attribute to make it more difficult results.sa['myattr'] = np.arange(5) rds = erds.a.mapper.reverse(results) assert_array_equal(rds, results.samples.reshape(nresults * expected_nsamples, nfeatures)) assert_array_equal(rds.sa.myattr, np.repeat(results.sa.myattr, expected_nsamples)) evs = [dict(onset=12, duration=2), dict(onset=70, duration=3)] evds = extract_boxcar_event_samples(ds, evs) # it goes for the max of all durations assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures)) # overide duration evds = extract_boxcar_event_samples(ds, evs, event_duration=1) assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures)) assert_equal(np.unique(evds.samples[1]), 70) # overide onset evds = extract_boxcar_event_samples(ds, evs, event_offset=2) assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures)) assert_equal(np.unique(evds.samples[1,:10]), 72) # overide both evds = extract_boxcar_event_samples(ds, evs, event_offset=-2, event_duration=1) assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures)) assert_equal(np.unique(evds.samples[1]), 68) def test_hrf_modeling(): skip_if_no_external('nibabel') skip_if_no_external('nipy') # ATM relies on NiPy's GLM implementation ds = load_example_fmri_dataset('25mm', literal=True) # TODO: simulate short dataset with known properties and use it # for testing events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks) tr = ds.a.imghdr['pixdim'][4] for ev in events: for a in ('onset', 'duration'): ev[a] = ev[a] * tr evds = eventrelated_dataset(ds, events, time_attr='time_coords', condition_attr='targets', design_kwargs=dict(drift_model='blank'), glmfit_kwargs=dict(model='ols'), model='hrf') # same voxels assert_equal(ds.nfeatures, evds.nfeatures) assert_array_equal(ds.fa.voxel_indices, evds.fa.voxel_indices) # one sample for each condition, plus constant assert_equal(sorted(ds.sa['targets'].unique), sorted(evds.sa.targets)) assert_equal(evds.a.add_regs.sa.regressor_names[0], 'constant') # with centered data zscore(ds) evds_demean = eventrelated_dataset(ds, events, time_attr='time_coords', condition_attr='targets', design_kwargs=dict(drift_model='blank'), glmfit_kwargs=dict(model='ols'), model='hrf') # after demeaning the constant should consume a lot less assert(evds.a.add_regs[0].samples.mean() > evds_demean.a.add_regs[0].samples.mean()) # from eyeballing the sensitivity example -- would be better to test this on # the tutorial data assert(evds_demean[evds.sa.targets == 'shoe'].samples.max() \ > evds_demean[evds.sa.targets == 'bottle'].samples.max()) # HRF models assert('regressors' in evds.sa) assert('regressors' in evds.a.add_regs.sa) assert_equal(evds.sa.regressors.shape[1], len(ds)) # custom regressors evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords', condition_attr='targets', regr_attrs=['time_indices'], design_kwargs=dict(drift_model='blank'), glmfit_kwargs=dict(model='ols'), model='hrf') # verify that nothing screwed up time_coords assert_equal(ds.sa.time_coords[0], 0) assert_equal(len(evds_regrs), len(evds)) # one more output sample in .a.add_regs assert_equal(len(evds_regrs.a.add_regs) - 1, len(evds.a.add_regs)) # comes last before constant assert_equal('time_indices', evds_regrs.a.add_regs.sa.regressor_names[-2]) # order of main regressors is unchanged assert_array_equal(evds.sa.targets, evds_regrs.sa.targets) # custom regressors from external sources evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords', condition_attr='targets', regr_attrs=['time_coords'], design_kwargs=dict(drift_model='blank', add_regs=np.linspace(1, -1, len(ds))[None].T, add_reg_names=['negative_trend']), glmfit_kwargs=dict(model='ols'), model='hrf') assert_equal(len(evds_regrs), len(evds)) # But we got one more in additional regressors assert_equal(len(evds_regrs.a.add_regs) - 2, len(evds.a.add_regs)) # comes last before constant assert_array_equal(['negative_trend', 'time_coords', 'constant'], evds_regrs.a.add_regs.sa.regressor_names) # order is otherwise unchanged assert_array_equal(evds.sa.targets, evds_regrs.sa.targets) # HRF models with estimating per each chunk assert_equal(ds.sa.time_coords[0], 0) evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords', condition_attr=['targets', 'chunks'], regr_attrs=['time_indices'], design_kwargs=dict(drift_model='blank'), glmfit_kwargs=dict(model='ols'), model='hrf') assert_true('add_regs' in evds_regrs.a) assert_true('time_indices' in evds_regrs.a.add_regs.sa.regressor_names) assert_equal(len(ds.UC) * len(ds.UT), len(evds_regrs)) assert_equal(len(evds_regrs.UC) * len(evds_regrs.UT), len(evds_regrs)) from mvpa2.mappers.fx import mean_group_sample evds_regrs_meaned = mean_group_sample(['targets'])(evds_regrs) assert_array_equal(evds_regrs_meaned.T, evds.T) # targets should be the same #corr = np.corrcoef(np.vstack((evds.samples, evds_regrs_meaned))) #import pydb; pydb.debugger() #pass #i = 1
48.45
96
0.626888
0
0
0
0
0
0
0
0
2,519
0.236326
7148d1a57a15a29836e2ab0aae7b7bc5dc398f57
1,174
py
Python
userbot/plugins/delfp.py
aksr-aashish/FIREXUSERBOT
dff0b7bf028cb27779626ce523402346cc990402
[ "MIT" ]
null
null
null
userbot/plugins/delfp.py
aksr-aashish/FIREXUSERBOT
dff0b7bf028cb27779626ce523402346cc990402
[ "MIT" ]
1
2022-01-09T11:35:06.000Z
2022-01-09T11:35:06.000Z
userbot/plugins/delfp.py
aksr-aashish/FIREXUSERBOT
dff0b7bf028cb27779626ce523402346cc990402
[ "MIT" ]
null
null
null
from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest from telethon.tl.types import InputPhoto from userbot.cmdhelp import CmdHelp from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add() @borg.on(admin_cmd(pattern="delpfp ?(.*)")) @borg.on(sudo_cmd(pattern="delpfp ?(.*)", allow_sudo=True)) async def remove_profilepic(delpfp): """For .delpfp command, delete your current profile picture in Telegram.""" group = delpfp.text[8:] if group == "all": lim = 0 elif group.isdigit(): lim = int(group) else: lim = 1 pfplist = await delpfp.client( GetUserPhotosRequest(user_id=delpfp.from_id, offset=0, max_id=0, limit=lim) ) input_photos = [InputPhoto( id=sep.id, access_hash=sep.access_hash, file_reference=sep.file_reference, ) for sep in pfplist.photos] await delpfp.client(DeletePhotosRequest(id=input_photos)) await edit_or_reply( delpfp, f"`Successfully deleted {len(input_photos)} profile picture(s).`" )
34.529412
86
0.67632
0
0
0
0
861
0.73339
757
0.644804
222
0.189097
7149245bb6b3dda015cca0a397d867fb3542c00d
1,308
py
Python
amlb/benchmarks/file.py
pplonski/automlbenchmark
f49ddfa2583643173296ed8ab45a8c14c62a6987
[ "MIT" ]
282
2018-09-19T09:45:46.000Z
2022-03-30T04:05:51.000Z
amlb/benchmarks/file.py
pplonski/automlbenchmark
f49ddfa2583643173296ed8ab45a8c14c62a6987
[ "MIT" ]
267
2018-11-02T11:43:11.000Z
2022-03-31T08:58:16.000Z
amlb/benchmarks/file.py
pplonski/automlbenchmark
f49ddfa2583643173296ed8ab45a8c14c62a6987
[ "MIT" ]
104
2018-10-17T19:32:36.000Z
2022-03-19T22:47:59.000Z
import logging import os from typing import List, Tuple, Optional from amlb.utils import config_load, Namespace log = logging.getLogger(__name__) def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str: # 'name' should be either a full path to the benchmark, # or a filename (without extension) in the benchmark directory. if os.path.exists(name): return name for bd in benchmark_definition_dirs: bf = os.path.join(bd, f"{name}.yaml") if os.path.exists(bf): # We don't account for duplicate definitions (yet). return bf # should we support s3 and check for s3 path before raising error? raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.") def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]: """ Loads benchmark from a local file. """ benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs) log.info("Loading benchmark definitions from %s.", benchmark_file) tasks = config_load(benchmark_file) benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file)) return benchmark_name, benchmark_file, tasks
39.636364
119
0.727829
0
0
0
0
0
0
0
0
427
0.326453
714957e1bb0b1384b108ed8e7921b1c771c5effe
4,815
py
Python
pybuspro/devices/control.py
eyesoft/pybuspro
9a178117be2db40ef1399cc60afdc18e251682bc
[ "MIT" ]
2
2019-03-15T03:47:10.000Z
2019-10-30T15:34:09.000Z
pybuspro/devices/control.py
eyesoft/pybuspro
9a178117be2db40ef1399cc60afdc18e251682bc
[ "MIT" ]
null
null
null
pybuspro/devices/control.py
eyesoft/pybuspro
9a178117be2db40ef1399cc60afdc18e251682bc
[ "MIT" ]
4
2019-01-12T17:50:24.000Z
2020-01-12T16:56:24.000Z
from ..core.telegram import Telegram from ..helpers.enums import OperateCode class _Control: def __init__(self, buspro): self._buspro = buspro self.subnet_id = None self.device_id = None @staticmethod def build_telegram_from_control(control): if control is None: return None if type(control) == _SingleChannelControl: operate_code = OperateCode.SingleChannelControl payload = [control.channel_number, control.channel_level, control.running_time_minutes, control.running_time_seconds] elif type(control) == _SceneControl: operate_code = OperateCode.SceneControl payload = [control.area_number, control.scene_number] elif type(control) == _ReadStatusOfChannels: operate_code = OperateCode.ReadStatusOfChannels payload = [] elif type(control) == _GenericControl: operate_code = control.operate_code payload = control.payload elif type(control) == _UniversalSwitch: operate_code = OperateCode.UniversalSwitchControl payload = [control.switch_number, control.switch_status.value] elif type(control) == _ReadStatusOfUniversalSwitch: operate_code = OperateCode.ReadStatusOfUniversalSwitch payload = [control.switch_number] elif type(control) == _ReadSensorStatus: operate_code = OperateCode.ReadSensorStatus payload = [] elif type(control) == _ReadSensorsInOneStatus: operate_code = OperateCode.ReadSensorsInOneStatus payload = [] elif type(control) == _ReadFloorHeatingStatus: operate_code = OperateCode.ReadFloorHeatingStatus payload = [] elif type(control) == _ReadDryContactStatus: operate_code = OperateCode.ReadDryContactStatus payload = [1, control.switch_number] elif type(control) == _ControlFloorHeatingStatus: operate_code = OperateCode.ControlFloorHeatingStatus payload = [control.temperature_type, control.status, control.mode, control.normal_temperature, control.day_temperature, control.night_temperature, control.away_temperature] else: return None telegram = Telegram() telegram.target_address = (control.subnet_id, control.device_id) telegram.operate_code = operate_code telegram.payload = payload return telegram @property def telegram(self): return self.build_telegram_from_control(self) async def send(self): telegram = self.telegram # if telegram.target_address[1] == 100: # print("==== {}".format(str(telegram))) await self._buspro.network_interface.send_telegram(telegram) class _GenericControl(_Control): def __init__(self, buspro): super().__init__(buspro) self.payload = None self.operate_code = None class _SingleChannelControl(_Control): def __init__(self, buspro): super().__init__(buspro) self.channel_number = None self.channel_level = None self.running_time_minutes = None self.running_time_seconds = None class _SceneControl(_Control): def __init__(self, buspro): super().__init__(buspro) self.area_number = None self.scene_number = None class _ReadStatusOfChannels(_Control): def __init__(self, buspro): super().__init__(buspro) # no more properties class _UniversalSwitch(_Control): def __init__(self, buspro): super().__init__(buspro) self.switch_number = None self.switch_status = None class _ReadStatusOfUniversalSwitch(_Control): def __init__(self, buspro): super().__init__(buspro) self.switch_number = None class _ReadSensorStatus(_Control): def __init__(self, buspro): super().__init__(buspro) # no more properties class _ReadSensorsInOneStatus(_Control): def __init__(self, buspro): super().__init__(buspro) # no more properties class _ReadFloorHeatingStatus(_Control): def __init__(self, buspro): super().__init__(buspro) # no more properties class _ControlFloorHeatingStatus(_Control): def __init__(self, buspro): super().__init__(buspro) self.temperature_type = None self.status = None self.mode = None self.normal_temperature = None self.day_temperature = None self.night_temperature = None self.away_temperature = None class _ReadDryContactStatus(_Control): def __init__(self, buspro): super().__init__(buspro) self.switch_number = None
28.660714
106
0.655867
4,702
0.976532
0
0
2,420
0.502596
226
0.046937
163
0.033853
7149b8c5cf18fd7bdd1bfdc804b0918d755edaae
5,961
py
Python
appengine/chrome_infra_console_loadtest/main.py
eunchong/infra
ce3728559112bfb3e8b32137eada517aec6d22f9
[ "BSD-3-Clause" ]
null
null
null
appengine/chrome_infra_console_loadtest/main.py
eunchong/infra
ce3728559112bfb3e8b32137eada517aec6d22f9
[ "BSD-3-Clause" ]
null
null
null
appengine/chrome_infra_console_loadtest/main.py
eunchong/infra
ce3728559112bfb3e8b32137eada517aec6d22f9
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import endpoints import random import webapp2 from apiclient import discovery from google.appengine.ext import ndb from oauth2client.client import GoogleCredentials from protorpc import messages from protorpc import message_types from protorpc import remote from components import auth CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY" API_NAME = 'consoleapp' API_VERSION = 'v1' DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest' class FieldParamsModel(ndb.Model): field_key = ndb.StringProperty() values = ndb.StringProperty(repeated=True) class MetricModel(ndb.Model): name = ndb.StringProperty(default="") minimum = ndb.FloatProperty(default=0) maximum = ndb.FloatProperty(default=100) class ParamsModel(ndb.Model): time = ndb.FloatProperty(default=10) freq = ndb.FloatProperty(default=1) url = ndb.StringProperty() params = ndb.LocalStructuredProperty(FieldParamsModel, repeated=True) metrics = ndb.LocalStructuredProperty(MetricModel, repeated=True) class Field(messages.Message): key = messages.StringField(1) value = messages.StringField(2) class Point(messages.Message): time = messages.FloatField(1) value = messages.FloatField(2) class FieldParams(messages.Message): field_key = messages.StringField(1) values = messages.StringField(2, repeated=True) class Metric(messages.Message): name = messages.StringField(1) minimum = messages.FloatField(2) maximum = messages.FloatField(3) class Params(messages.Message): time = messages.FloatField(1) freq = messages.FloatField(2) url = messages.StringField(3) params = messages.MessageField(FieldParams, 4, repeated=True) metrics = messages.MessageField(Metric, 5, repeated=True) class TimeSeries(messages.Message): points = messages.MessageField(Point, 1, repeated=True) fields = messages.MessageField(Field, 2, repeated=True) metric = messages.StringField(3) class DataPacket(messages.Message): timeseries = messages.MessageField(TimeSeries, 1, repeated=True) @auth.endpoints_api(name='consoleapp', version='v1') class LoadTestApi(remote.Service): """A testing endpoint that receives timeseries data.""" @auth.endpoints_method(DataPacket, message_types.VoidMessage, name='timeseries.update') @auth.require(lambda: auth.is_group_member('metric-generators')) def timeseries_update(self, request): logging.debug('Datapacket length is %d', len(request.timeseries)) return message_types.VoidMessage() @auth.endpoints_api(name='ui', version='v1') class UIApi(remote.Service): """API for the loadtest configuration UI.""" @auth.endpoints_method(message_types.VoidMessage, Params, name='ui.get') @auth.require(lambda: auth.is_group_member('metric-generators')) def UI_get(self, _request): data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY) params = [FieldParams(field_key=field.field_key, values=field.values) for field in data.params] metrics = [Metric(name=metric.name, minimum=metric.minimum, maximum=metric.maximum) for metric in data.metrics] return Params(time=data.time, freq=data.freq, url=data.url, params=params, metrics=metrics) @auth.endpoints_method(Params, message_types.VoidMessage, name='ui.set') @auth.require(lambda: auth.is_group_member('metric-generators')) def UI_set(self, request): logging.debug('Got %s', request) data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY) data.time = request.time data.freq = request.freq data.url = request.url data.params = [FieldParamsModel(field_key=field.field_key, values=field.values) for field in request.params] data.metrics = [MetricModel(name=metric.name, minimum=metric.minimum, maximum=metric.maximum) for metric in request.metrics] data.put() return message_types.VoidMessage() def field_generator(dataparams, index, fields): if index == len(dataparams): return [fields] else: key = dataparams[index].field_key return sum((field_generator( dataparams, index+1, fields+[{'key': key, 'value': value}]) for value in dataparams[index].values), []) class CronHandler(webapp2.RequestHandler): def get(self): data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY) metric_ranges = {} for metric in data.metrics: metric_ranges[metric.name] = (metric.minimum,metric.maximum) datapacket = {'timeseries': []} logging.debug('There are %d metrics', len(metric_ranges)) fieldlist = field_generator(data.params, 0, []) for metric in metric_ranges: for fields in fieldlist: points = [] for x in xrange(0, int(data.time), int(data.freq)): points.append({'time': x, 'value': random.uniform(*metric_ranges[metric])}) timeseries = {'points': points, 'fields': fields, 'metric': metric} datapacket['timeseries'].append(timeseries) logging.info('Send data to %s', data.url) discovery_url = DISCOVERY_URL % data.url credentials = GoogleCredentials.get_application_default() service = discovery.build(API_NAME, API_VERSION, discoveryServiceUrl=discovery_url, credentials=credentials) _response = service.timeseries().update(body=datapacket).execute() backend_handlers = [ ('/cron', CronHandler) ] WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True) APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
33.301676
78
0.690824
4,719
0.791646
0
0
2,071
0.347425
0
0
620
0.104009
7149cd13d14ac2cce8176e2e198709907cc8c456
9,523
py
Python
src/mitre/securingai/restapi/task_plugin/controller.py
usnistgov/dioptra
08a08e96c27787915bafc75a483431333e2c70ca
[ "CC-BY-4.0" ]
14
2021-06-17T15:16:12.000Z
2021-11-08T10:25:37.000Z
src/mitre/securingai/restapi/task_plugin/controller.py
usnistgov/dioptra
08a08e96c27787915bafc75a483431333e2c70ca
[ "CC-BY-4.0" ]
7
2021-09-20T20:20:26.000Z
2022-03-30T13:17:43.000Z
src/mitre/securingai/restapi/task_plugin/controller.py
usnistgov/dioptra
08a08e96c27787915bafc75a483431333e2c70ca
[ "CC-BY-4.0" ]
4
2021-06-29T16:52:42.000Z
2022-01-21T16:56:45.000Z
# This Software (Dioptra) is being made available as a public service by the # National Institute of Standards and Technology (NIST), an Agency of the United # States Department of Commerce. This software was developed in part by employees of # NIST and in part by NIST contractors. Copyright in portions of this software that # were developed by NIST contractors has been licensed or assigned to NIST. Pursuant # to Title 17 United States Code Section 105, works of NIST employees are not # subject to copyright protection in the United States. However, NIST may hold # international copyright in software created by its employees and domestic # copyright (or licensing rights) in portions of software that were assigned or # licensed to NIST. To the extent that NIST holds copyright in this software, it is # being made available under the Creative Commons Attribution 4.0 International # license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts # of the software developed or licensed by NIST. # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode """The module defining the task plugin endpoints.""" import uuid from typing import List, Optional import structlog from flask import current_app, jsonify from flask.wrappers import Response from flask_accepts import accepts, responds from flask_restx import Namespace, Resource from injector import inject from structlog.stdlib import BoundLogger from mitre.securingai.restapi.utils import as_api_parser from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData from .schema import TaskPluginSchema, TaskPluginUploadSchema from .service import TaskPluginService LOGGER: BoundLogger = structlog.stdlib.get_logger() api: Namespace = Namespace( "TaskPlugin", description="Task plugin registry operations", ) @api.route("/") class TaskPluginResource(Resource): """Shows a list of all task plugins, and lets you POST to upload new ones.""" @inject def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None: self._task_plugin_service = task_plugin_service super().__init__(*args, **kwargs) @responds(schema=TaskPluginSchema(many=True), api=api) def get(self) -> List[TaskPlugin]: """Gets a list of all registered task plugins.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="GET" ) log.info("Request received") return self._task_plugin_service.get_all( bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log ) @api.expect(as_api_parser(api, TaskPluginUploadSchema)) @accepts(TaskPluginUploadSchema, api=api) @responds(schema=TaskPluginSchema, api=api) def post(self) -> TaskPlugin: """Registers a new task plugin uploaded via the task plugin upload form.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="POST" ) task_plugin_upload_form: TaskPluginUploadForm = TaskPluginUploadForm() log.info("Request received") if not task_plugin_upload_form.validate_on_submit(): log.error("Form validation failed") raise TaskPluginUploadError log.info("Form validation successful") task_plugin_upload_form_data: TaskPluginUploadFormData = ( self._task_plugin_service.extract_data_from_form( task_plugin_upload_form=task_plugin_upload_form, log=log ) ) return self._task_plugin_service.create( task_plugin_upload_form_data=task_plugin_upload_form_data, bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log, ) @api.route("/securingai_builtins") class TaskPluginBuiltinsCollectionResource(Resource): """Shows a list of all builtin task plugins.""" @inject def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None: self._task_plugin_service = task_plugin_service super().__init__(*args, **kwargs) @responds(schema=TaskPluginSchema(many=True), api=api) def get(self) -> List[TaskPlugin]: """Gets a list of all available builtin task plugins.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPluginBuiltinCollection", request_type="GET", ) log.info("Request received") return self._task_plugin_service.get_all_in_collection( collection="securingai_builtins", bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log, ) @api.route("/securingai_builtins/<string:taskPluginName>") @api.param( "taskPluginName", "A unique string identifying a task plugin package within securingai_builtins " "collection.", ) class TaskPluginBuiltinCollectionNameResource(Resource): """Shows a single builtin task plugin package.""" @inject def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None: self._task_plugin_service = task_plugin_service super().__init__(*args, **kwargs) @responds(schema=TaskPluginSchema, api=api) def get(self, taskPluginName: str) -> TaskPlugin: """Gets a builtin task plugin by its unique name.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPluginBuiltinCollectionName", request_type="GET", ) log.info("Request received") task_plugin: Optional[ TaskPlugin ] = self._task_plugin_service.get_by_name_in_collection( collection="securingai_builtins", task_plugin_name=taskPluginName, bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log, ) if task_plugin is None: log.error( "TaskPlugin not found", task_plugin_name=taskPluginName, collection="securingai_builtins", ) raise TaskPluginDoesNotExistError return task_plugin @api.route("/securingai_custom") class TaskPluginCustomCollectionResource(Resource): """Shows a list of all custom task plugins.""" @inject def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None: self._task_plugin_service = task_plugin_service super().__init__(*args, **kwargs) @responds(schema=TaskPluginSchema(many=True), api=api) def get(self) -> List[TaskPlugin]: """Gets a list of all registered custom task plugins.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPluginCustomCollection", request_type="GET", ) log.info("Request received") return self._task_plugin_service.get_all_in_collection( collection="securingai_custom", bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log, ) @api.route("/securingai_custom/<string:taskPluginName>") @api.param( "taskPluginName", "A unique string identifying a task plugin package within securingai_custom " "collection.", ) class TaskPluginCustomCollectionNameResource(Resource): """Shows a single custom task plugin package and lets you delete it.""" @inject def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None: self._task_plugin_service = task_plugin_service super().__init__(*args, **kwargs) @responds(schema=TaskPluginSchema, api=api) def get(self, taskPluginName: str) -> TaskPlugin: """Gets a custom task plugin by its unique name.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPluginCustomCollectionName", request_type="GET", ) log.info("Request received") task_plugin: Optional[ TaskPlugin ] = self._task_plugin_service.get_by_name_in_collection( collection="securingai_custom", task_plugin_name=taskPluginName, bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log, ) if task_plugin is None: log.error( "TaskPlugin not found", task_plugin_name=taskPluginName, collection="securingai_custom", ) raise TaskPluginDoesNotExistError return task_plugin def delete(self, taskPluginName: str) -> Response: """Deletes a custom task plugin by its unique name.""" log: BoundLogger = LOGGER.new( request_id=str(uuid.uuid4()), resource="taskPluginCustomCollectionName", task_plugin_name=taskPluginName, request_type="DELETE", ) log.info("Request received") task_plugins: List[TaskPlugin] = self._task_plugin_service.delete( collection="securingai_custom", task_plugin_name=taskPluginName, bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log, ) name: List[str] = [x.task_plugin_name for x in task_plugins] return jsonify( # type: ignore dict(status="Success", collection="securingai_custom", taskPluginName=name) )
37.789683
88
0.676888
7,100
0.745563
0
0
7,576
0.795548
0
0
2,987
0.313662
714a5d7f1ebf03213e86c878b9d094ccb13ebf53
16,181
py
Python
dulwich/tests/test_lru_cache.py
mjmaenpaa/dulwich
d13a0375f4cc3099ff1c6edacda97f317c28f67a
[ "Apache-2.0" ]
null
null
null
dulwich/tests/test_lru_cache.py
mjmaenpaa/dulwich
d13a0375f4cc3099ff1c6edacda97f317c28f67a
[ "Apache-2.0" ]
null
null
null
dulwich/tests/test_lru_cache.py
mjmaenpaa/dulwich
d13a0375f4cc3099ff1c6edacda97f317c28f67a
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2006, 2008 Canonical Ltd # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache # License, Version 2.0. # """Tests for the lru_cache module.""" from dulwich import ( lru_cache, ) from dulwich.tests import ( TestCase, ) class TestLRUCache(TestCase): """Test that LRU cache properly keeps track of entries.""" def test_cache_size(self): cache = lru_cache.LRUCache(max_cache=10) self.assertEqual(10, cache.cache_size()) cache = lru_cache.LRUCache(max_cache=256) self.assertEqual(256, cache.cache_size()) cache.resize(512) self.assertEqual(512, cache.cache_size()) def test_missing(self): cache = lru_cache.LRUCache(max_cache=10) self.assertFalse('foo' in cache) self.assertRaises(KeyError, cache.__getitem__, 'foo') cache['foo'] = 'bar' self.assertEqual('bar', cache['foo']) self.assertTrue('foo' in cache) self.assertFalse('bar' in cache) def test_map_None(self): # Make sure that we can properly map None as a key. cache = lru_cache.LRUCache(max_cache=10) self.assertFalse(None in cache) cache[None] = 1 self.assertEqual(1, cache[None]) cache[None] = 2 self.assertEqual(2, cache[None]) # Test the various code paths of __getitem__, to make sure that we can # handle when None is the key for the LRU and the MRU cache[1] = 3 cache[None] = 1 cache[None] cache[1] cache[None] self.assertEqual([None, 1], [n.key for n in cache._walk_lru()]) def test_add__null_key(self): cache = lru_cache.LRUCache(max_cache=10) self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1) def test_overflow(self): """Adding extra entries will pop out old ones.""" cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1) cache['foo'] = 'bar' # With a max cache of 1, adding 'baz' should pop out 'foo' cache['baz'] = 'biz' self.assertFalse('foo' in cache) self.assertTrue('baz' in cache) self.assertEqual('biz', cache['baz']) def test_by_usage(self): """Accessing entries bumps them up in priority.""" cache = lru_cache.LRUCache(max_cache=2) cache['baz'] = 'biz' cache['foo'] = 'bar' self.assertEqual('biz', cache['baz']) # This must kick out 'foo' because it was the last accessed cache['nub'] = 'in' self.assertFalse('foo' in cache) def test_cleanup(self): """Test that we can use a cleanup function.""" cleanup_called = [] def cleanup_func(key, val): cleanup_called.append((key, val)) cache = lru_cache.LRUCache(max_cache=2, after_cleanup_count=2) cache.add('baz', '1', cleanup=cleanup_func) cache.add('foo', '2', cleanup=cleanup_func) cache.add('biz', '3', cleanup=cleanup_func) self.assertEqual([('baz', '1')], cleanup_called) # 'foo' is now most recent, so final cleanup will call it last cache['foo'] cache.clear() self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')], cleanup_called) def test_cleanup_on_replace(self): """Replacing an object should cleanup the old value.""" cleanup_called = [] def cleanup_func(key, val): cleanup_called.append((key, val)) cache = lru_cache.LRUCache(max_cache=2) cache.add(1, 10, cleanup=cleanup_func) cache.add(2, 20, cleanup=cleanup_func) cache.add(2, 25, cleanup=cleanup_func) self.assertEqual([(2, 20)], cleanup_called) self.assertEqual(25, cache[2]) # Even __setitem__ should make sure cleanup() is called cache[2] = 26 self.assertEqual([(2, 20), (2, 25)], cleanup_called) def test_len(self): cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10) cache[1] = 10 cache[2] = 20 cache[3] = 30 cache[4] = 40 self.assertEqual(4, len(cache)) cache[5] = 50 cache[6] = 60 cache[7] = 70 cache[8] = 80 self.assertEqual(8, len(cache)) cache[1] = 15 # replacement self.assertEqual(8, len(cache)) cache[9] = 90 cache[10] = 100 cache[11] = 110 # We hit the max self.assertEqual(10, len(cache)) self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3], [n.key for n in cache._walk_lru()]) def test_cleanup_shrinks_to_after_clean_count(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3) cache.add(1, 10) cache.add(2, 20) cache.add(3, 25) cache.add(4, 30) cache.add(5, 35) self.assertEqual(5, len(cache)) # This will bump us over the max, which causes us to shrink down to # after_cleanup_cache size cache.add(6, 40) self.assertEqual(3, len(cache)) def test_after_cleanup_larger_than_max(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10) self.assertEqual(5, cache._after_cleanup_count) def test_after_cleanup_none(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None) # By default _after_cleanup_size is 80% of the normal size self.assertEqual(4, cache._after_cleanup_count) def test_cleanup_2(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2) # Add these in order cache.add(1, 10) cache.add(2, 20) cache.add(3, 25) cache.add(4, 30) cache.add(5, 35) self.assertEqual(5, len(cache)) # Force a compaction cache.cleanup() self.assertEqual(2, len(cache)) def test_preserve_last_access_order(self): cache = lru_cache.LRUCache(max_cache=5) # Add these in order cache.add(1, 10) cache.add(2, 20) cache.add(3, 25) cache.add(4, 30) cache.add(5, 35) self.assertEqual([5, 4, 3, 2, 1], [n.key for n in cache._walk_lru()]) # Now access some randomly cache[2] cache[5] cache[3] cache[2] self.assertEqual([2, 3, 5, 4, 1], [n.key for n in cache._walk_lru()]) def test_get(self): cache = lru_cache.LRUCache(max_cache=5) cache.add(1, 10) cache.add(2, 20) self.assertEqual(20, cache.get(2)) self.assertEqual(None, cache.get(3)) obj = object() self.assertTrue(obj is cache.get(3, obj)) self.assertEqual([2, 1], [n.key for n in cache._walk_lru()]) self.assertEqual(10, cache.get(1)) self.assertEqual([1, 2], [n.key for n in cache._walk_lru()]) def test_keys(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5) cache[1] = 2 cache[2] = 3 cache[3] = 4 self.assertEqual([1, 2, 3], sorted(cache.keys())) cache[4] = 5 cache[5] = 6 cache[6] = 7 self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys())) def test_resize_smaller(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4) cache[1] = 2 cache[2] = 3 cache[3] = 4 cache[4] = 5 cache[5] = 6 self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys())) cache[6] = 7 self.assertEqual([3, 4, 5, 6], sorted(cache.keys())) # Now resize to something smaller, which triggers a cleanup cache.resize(max_cache=3, after_cleanup_count=2) self.assertEqual([5, 6], sorted(cache.keys())) # Adding something will use the new size cache[7] = 8 self.assertEqual([5, 6, 7], sorted(cache.keys())) cache[8] = 9 self.assertEqual([7, 8], sorted(cache.keys())) def test_resize_larger(self): cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4) cache[1] = 2 cache[2] = 3 cache[3] = 4 cache[4] = 5 cache[5] = 6 self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys())) cache[6] = 7 self.assertEqual([3, 4, 5, 6], sorted(cache.keys())) cache.resize(max_cache=8, after_cleanup_count=6) self.assertEqual([3, 4, 5, 6], sorted(cache.keys())) cache[7] = 8 cache[8] = 9 cache[9] = 10 cache[10] = 11 self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys())) cache[11] = 12 # triggers cleanup back to new after_cleanup_count self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys())) class TestLRUSizeCache(TestCase): def test_basic_init(self): cache = lru_cache.LRUSizeCache() self.assertEqual(2048, cache._max_cache) self.assertEqual(int(cache._max_size*0.8), cache._after_cleanup_size) self.assertEqual(0, cache._value_size) def test_add__null_key(self): cache = lru_cache.LRUSizeCache() self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1) def test_add_tracks_size(self): cache = lru_cache.LRUSizeCache() self.assertEqual(0, cache._value_size) cache.add('my key', 'my value text') self.assertEqual(13, cache._value_size) def test_remove_tracks_size(self): cache = lru_cache.LRUSizeCache() self.assertEqual(0, cache._value_size) cache.add('my key', 'my value text') self.assertEqual(13, cache._value_size) node = cache._cache['my key'] cache._remove_node(node) self.assertEqual(0, cache._value_size) def test_no_add_over_size(self): """Adding a large value may not be cached at all.""" cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5) self.assertEqual(0, cache._value_size) self.assertEqual({}, cache.items()) cache.add('test', 'key') self.assertEqual(3, cache._value_size) self.assertEqual({'test': 'key'}, cache.items()) cache.add('test2', 'key that is too big') self.assertEqual(3, cache._value_size) self.assertEqual({'test':'key'}, cache.items()) # If we would add a key, only to cleanup and remove all cached entries, # then obviously that value should not be stored cache.add('test3', 'bigkey') self.assertEqual(3, cache._value_size) self.assertEqual({'test':'key'}, cache.items()) cache.add('test4', 'bikey') self.assertEqual(3, cache._value_size) self.assertEqual({'test':'key'}, cache.items()) def test_no_add_over_size_cleanup(self): """If a large value is not cached, we will call cleanup right away.""" cleanup_calls = [] def cleanup(key, value): cleanup_calls.append((key, value)) cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5) self.assertEqual(0, cache._value_size) self.assertEqual({}, cache.items()) cache.add('test', 'key that is too big', cleanup=cleanup) # key was not added self.assertEqual(0, cache._value_size) self.assertEqual({}, cache.items()) # and cleanup was called self.assertEqual([('test', 'key that is too big')], cleanup_calls) def test_adding_clears_cache_based_on_size(self): """The cache is cleared in LRU order until small enough""" cache = lru_cache.LRUSizeCache(max_size=20) cache.add('key1', 'value') # 5 chars cache.add('key2', 'value2') # 6 chars cache.add('key3', 'value23') # 7 chars self.assertEqual(5+6+7, cache._value_size) cache['key2'] # reference key2 so it gets a newer reference time cache.add('key4', 'value234') # 8 chars, over limit # We have to remove 2 keys to get back under limit self.assertEqual(6+8, cache._value_size) self.assertEqual({'key2':'value2', 'key4':'value234'}, cache.items()) def test_adding_clears_to_after_cleanup_size(self): cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10) cache.add('key1', 'value') # 5 chars cache.add('key2', 'value2') # 6 chars cache.add('key3', 'value23') # 7 chars self.assertEqual(5+6+7, cache._value_size) cache['key2'] # reference key2 so it gets a newer reference time cache.add('key4', 'value234') # 8 chars, over limit # We have to remove 3 keys to get back under limit self.assertEqual(8, cache._value_size) self.assertEqual({'key4':'value234'}, cache.items()) def test_custom_sizes(self): def size_of_list(lst): return sum(len(x) for x in lst) cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10, compute_size=size_of_list) cache.add('key1', ['val', 'ue']) # 5 chars cache.add('key2', ['val', 'ue2']) # 6 chars cache.add('key3', ['val', 'ue23']) # 7 chars self.assertEqual(5+6+7, cache._value_size) cache['key2'] # reference key2 so it gets a newer reference time cache.add('key4', ['value', '234']) # 8 chars, over limit # We have to remove 3 keys to get back under limit self.assertEqual(8, cache._value_size) self.assertEqual({'key4':['value', '234']}, cache.items()) def test_cleanup(self): cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10) # Add these in order cache.add('key1', 'value') # 5 chars cache.add('key2', 'value2') # 6 chars cache.add('key3', 'value23') # 7 chars self.assertEqual(5+6+7, cache._value_size) cache.cleanup() # Only the most recent fits after cleaning up self.assertEqual(7, cache._value_size) def test_keys(self): cache = lru_cache.LRUSizeCache(max_size=10) cache[1] = 'a' cache[2] = 'b' cache[3] = 'cdef' self.assertEqual([1, 2, 3], sorted(cache.keys())) def test_resize_smaller(self): cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9) cache[1] = 'abc' cache[2] = 'def' cache[3] = 'ghi' cache[4] = 'jkl' # Triggers a cleanup self.assertEqual([2, 3, 4], sorted(cache.keys())) # Resize should also cleanup again cache.resize(max_size=6, after_cleanup_size=4) self.assertEqual([4], sorted(cache.keys())) # Adding should use the new max size cache[5] = 'mno' self.assertEqual([4, 5], sorted(cache.keys())) cache[6] = 'pqr' self.assertEqual([6], sorted(cache.keys())) def test_resize_larger(self): cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9) cache[1] = 'abc' cache[2] = 'def' cache[3] = 'ghi' cache[4] = 'jkl' # Triggers a cleanup self.assertEqual([2, 3, 4], sorted(cache.keys())) cache.resize(max_size=15, after_cleanup_size=12) self.assertEqual([2, 3, 4], sorted(cache.keys())) cache[5] = 'mno' cache[6] = 'pqr' self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys())) cache[7] = 'stu' self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
35.798673
79
0.603115
15,148
0.93616
0
0
0
0
0
0
3,772
0.233113
714b8d767c11fadd1e5da33bbf5b7d19a7d70405
382
py
Python
py/2016/5B.py
pedrotari7/advent_of_code
98d5bc8d903435624a019a5702f5421d7b4ef8c8
[ "MIT" ]
null
null
null
py/2016/5B.py
pedrotari7/advent_of_code
98d5bc8d903435624a019a5702f5421d7b4ef8c8
[ "MIT" ]
null
null
null
py/2016/5B.py
pedrotari7/advent_of_code
98d5bc8d903435624a019a5702f5421d7b4ef8c8
[ "MIT" ]
null
null
null
import md5 (i,count) = (0,0) password = ['']*8 while 1: key = 'reyedfim' + str(i) md = md5.new(key).hexdigest() if md[:5] == '00000': index = int(md[5],16) if index < len(password) and password[index]=='': password[index] = md[6] count += 1 if count == 8: break i+=1 print ''.join(password)
17.363636
58
0.465969
0
0
0
0
0
0
0
0
23
0.060209
714cfc19c240490817e3657df9cb9287844afbb6
16,391
py
Python
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py
lsica-scopely/mgear4
28ed5d66370a9516da05d93d447bfc15f4c0c9f4
[ "MIT" ]
null
null
null
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py
lsica-scopely/mgear4
28ed5d66370a9516da05d93d447bfc15f4c0c9f4
[ "MIT" ]
null
null
null
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py
lsica-scopely/mgear4
28ed5d66370a9516da05d93d447bfc15f4c0c9f4
[ "MIT" ]
null
null
null
import pymel.core as pm import ast from pymel.core import datatypes from mgear.shifter import component from mgear.core import node, applyop, vector from mgear.core import attribute, transform, primitive class Component(component.Main): """Shifter component Class""" # ===================================================== # OBJECTS # ===================================================== def addObjects(self): """Add all the objects needed to create the component.""" # joint Description Names jd_names = ast.literal_eval( self.settings["jointNamesDescription_custom"] ) jdn_ball = jd_names[0] self.up_axis = pm.upAxis(q=True, axis=True) self.div_count = len(self.guide.apos) - 5 plane = [self.guide.apos[0], self.guide.apos[-4], self.guide.apos[-3]] self.normal = self.getNormalFromPos(plane) self.binormal = self.getBiNormalFromPos(plane) # Heel --------------------------------------------- # bank pivot t = transform.getTransformLookingAt( self.guide.pos["heel"], self.guide.apos[-4], self.normal, "xz", self.negate, ) t = transform.setMatrixPosition(t, self.guide.pos["inpivot"]) self.in_npo = primitive.addTransform( self.root, self.getName("in_npo"), t ) self.in_piv = primitive.addTransform( self.in_npo, self.getName("in_piv"), t ) t = transform.setMatrixPosition(t, self.guide.pos["outpivot"]) self.out_piv = primitive.addTransform( self.in_piv, self.getName("out_piv"), t ) # heel t = transform.getTransformLookingAt( self.guide.pos["heel"], self.guide.apos[-4], self.normal, "xz", self.negate, ) self.heel_loc = primitive.addTransform( self.out_piv, self.getName("heel_loc"), t ) attribute.setRotOrder(self.heel_loc, "YZX") self.heel_ctl = self.addCtl( self.heel_loc, "heel_ctl", t, self.color_ik, "sphere", w=self.size * 0.1, tp=self.parentCtlTag, ) attribute.setKeyableAttributes(self.heel_ctl, self.r_params) # Tip ---------------------------------------------- if self.up_axis == "y": v = datatypes.Vector( self.guide.apos[-5].x, self.guide.pos["heel"].y, self.guide.apos[-5].z, ) else: v = datatypes.Vector( self.guide.apos[-5].x, self.guide.apos[-5].y, self.guide.pos["heel"].z, ) t = transform.setMatrixPosition(t, v) self.tip_ctl = self.addCtl( self.heel_ctl, "tip_ctl", t, self.color_ik, "circle", w=self.size, tp=self.heel_ctl, ) attribute.setKeyableAttributes(self.tip_ctl, self.r_params) # Roll --------------------------------------------- if self.settings["useRollCtl"]: t = transform.getTransformLookingAt( self.guide.pos["heel"], self.guide.apos[-4], self.normal, "xz", self.negate, ) t = transform.setMatrixPosition(t, self.guide.pos["root"]) self.roll_np = primitive.addTransform( self.root, self.getName("roll_npo"), t ) self.roll_ctl = self.addCtl( self.roll_np, "roll_ctl", t, self.color_ik, "cylinder", w=self.size * 0.5, h=self.size * 0.5, ro=datatypes.Vector(3.1415 * 0.5, 0, 0), tp=self.tip_ctl, ) attribute.setKeyableAttributes(self.roll_ctl, ["rx", "rz"]) # Backward Controlers ------------------------------ bk_pos = self.guide.apos[1:-3] bk_pos.reverse() parent = self.tip_ctl self.bk_ctl = [] self.bk_loc = [] self.previousTag = self.tip_ctl for i, pos in enumerate(bk_pos): if i == 0: t = transform.getTransform(self.heel_ctl) t = transform.setMatrixPosition(t, pos) else: direction = bk_pos[i - 1] t = transform.getTransformLookingAt( pos, direction, self.normal, "xz", self.negate ) bk_loc = primitive.addTransform( parent, self.getName("bk%s_loc" % i), t ) bk_ctl = self.addCtl( bk_loc, "bk%s_ctl" % i, t, self.color_ik, "sphere", w=self.size * 0.15, tp=self.previousTag, ) attribute.setKeyableAttributes(bk_ctl, self.r_params) self.previousTag = bk_ctl self.bk_loc.append(bk_loc) self.bk_ctl.append(bk_ctl) parent = bk_ctl # FK Reference ------------------------------------ self.fk_ref = primitive.addTransformFromPos( self.bk_ctl[-1], self.getName("fk_ref"), self.guide.apos[0] ) self.fk_npo = primitive.addTransform( self.fk_ref, self.getName("fk0_npo"), transform.getTransform(self.bk_ctl[-1]), ) # Forward Controlers ------------------------------ self.fk_ctl = [] self.fk_loc = [] parent = self.fk_npo self.previousTag = self.tip_ctl for i, bk_ctl in enumerate(reversed(self.bk_ctl[1:])): if i == len(self.bk_ctl) - 2: t = transform.getTransform(self.tip_ctl) v = transform.getTranslation(bk_ctl) t = transform.setMatrixPosition(t, v) else: t = transform.getTransform(bk_ctl) dist = vector.getDistance( self.guide.apos[i + 1], self.guide.apos[i + 2] ) fk_loc = primitive.addTransform( parent, self.getName("fk%s_loc" % i), t ) po_vec = datatypes.Vector(dist * 0.5 * self.n_factor, 0, 0) fk_ctl = self.addCtl( fk_loc, "fk%s_ctl" % i, t, self.color_fk, "cube", w=dist, h=self.size * 0.5, d=self.size * 0.5, po=po_vec, tp=self.previousTag, ) self.previousTag = fk_ctl attribute.setKeyableAttributes(fk_ctl) if i: name = jdn_ball + str(i) else: name = jdn_ball self.jnt_pos.append([fk_ctl, name]) parent = fk_ctl self.fk_ctl.append(fk_ctl) self.fk_loc.append(fk_loc) # ===================================================== # ATTRIBUTES # ===================================================== def addAttributes(self): """Create the anim and setupr rig attributes for the component""" # Anim ------------------------------------------- # Roll Angles if not self.settings["useRollCtl"]: self.roll_att = self.addAnimParam( "roll", "Roll", "double", 0, -180, 180 ) self.bank_att = self.addAnimParam( "bank", "Bank", "double", 0, -180, 180 ) self.angles_att = [ self.addAnimParam("angle_%s" % i, "Angle %s" % i, "double", -20) for i in range(self.div_count) ] # Setup ------------------------------------------ self.blend_att = self.addSetupParam( "blend", "Fk/Ik Blend", "double", 1, 0, 1 ) # ===================================================== # OPERATORS # ===================================================== def addOperators(self): """Create operators and set the relations for the component rig Apply operators, constraints, expressions to the hierarchy. In order to keep the code clean and easier to debug, we shouldn't create any new object in this method. """ # Visibilities ------------------------------------- try: # ik if self.settings["useRollCtl"]: for shp in self.roll_ctl.getShapes(): pm.connectAttr(self.blend_att, shp.attr("visibility")) for bk_ctl in self.bk_ctl: for shp in bk_ctl.getShapes(): pm.connectAttr(self.blend_att, shp.attr("visibility")) for shp in self.heel_ctl.getShapes(): pm.connectAttr(self.blend_att, shp.attr("visibility")) for shp in self.tip_ctl.getShapes(): pm.connectAttr(self.blend_att, shp.attr("visibility")) except RuntimeError: pm.displayInfo("Visibility already connect") # Roll / Bank -------------------------------------- if self.settings["useRollCtl"]: # Using the controler self.roll_att = self.roll_ctl.attr("rz") self.bank_att = self.roll_ctl.attr("rx") clamp_node = node.createClampNode( [self.roll_att, self.bank_att, self.bank_att], [0, -180, 0], [180, 0, 180], ) inAdd_nod = node.createAddNode( clamp_node.outputB, pm.getAttr(self.in_piv.attr("rx")) * self.n_factor, ) pm.connectAttr(clamp_node.outputR, self.heel_loc.attr("rz")) pm.connectAttr(clamp_node.outputG, self.out_piv.attr("rx")) pm.connectAttr(inAdd_nod.output, self.in_piv.attr("rx")) # Reverse Controler offset ------------------------- angle_outputs = node.createAddNodeMulti(self.angles_att) for i, bk_loc in enumerate(reversed(self.bk_loc)): if i == 0: # First inpu = self.roll_att min_input = self.angles_att[i] elif i == len(self.angles_att): # Last sub_nod = node.createSubNode( self.roll_att, angle_outputs[i - 1] ) inpu = sub_nod.output min_input = -360 else: # Others sub_nod = node.createSubNode( self.roll_att, angle_outputs[i - 1] ) inpu = sub_nod.output min_input = self.angles_att[i] clamp_node = node.createClampNode(inpu, min_input, 0) add_node = node.createAddNode( clamp_node.outputR, bk_loc.getAttr("rz") ) pm.connectAttr(add_node.output, bk_loc.attr("rz")) # Reverse compensation ----------------------------- for i, fk_loc in enumerate(self.fk_loc): bk_ctl = self.bk_ctl[-i - 1] bk_loc = self.bk_loc[-i - 1] fk_ctl = self.fk_ctl[i] # Inverse Rotorder o_node = applyop.gear_inverseRotorder_op(bk_ctl, fk_ctl) pm.connectAttr(o_node.output, bk_loc.attr("ro")) pm.connectAttr(fk_ctl.attr("ro"), fk_loc.attr("ro")) attribute.lockAttribute(bk_ctl, "ro") # Compensate the backward rotation # ik addx_node = node.createAddNode( bk_ctl.attr("rx"), bk_loc.attr("rx") ) addy_node = node.createAddNode( bk_ctl.attr("ry"), bk_loc.attr("ry") ) addz_node = node.createAddNode( bk_ctl.attr("rz"), bk_loc.attr("rz") ) addz_node = node.createAddNode( addz_node.output, -bk_loc.getAttr("rz") - fk_loc.getAttr("rz") ) neg_node = node.createMulNode( [addx_node.output, addy_node.output, addz_node.output], [-1, -1, -1], ) add_node = node.createAddNode( neg_node.outputY.get() * -1, neg_node.outputY ) ik_outputs = [neg_node.outputX, add_node.output, neg_node.outputZ] # fk fk_outputs = [0, 0, fk_loc.getAttr("rz")] # blend blend_node = node.createBlendNode( ik_outputs, fk_outputs, self.blend_att ) pm.connectAttr(blend_node.output, fk_loc.attr("rotate")) return # ===================================================== # CONNECTOR # ===================================================== def setRelation(self): """Set the relation beetween object from guide to rig""" self.relatives["root"] = self.fk_ctl[0] self.relatives["heel"] = self.fk_ctl[0] self.relatives["inpivot"] = self.fk_ctl[0] self.relatives["outpivot"] = self.fk_ctl[0] self.controlRelatives["root"] = self.fk_ctl[0] self.controlRelatives["heel"] = self.fk_ctl[0] self.controlRelatives["inpivot"] = self.fk_ctl[0] self.controlRelatives["outpivot"] = self.fk_ctl[0] self.jointRelatives["root"] = 0 self.jointRelatives["heel"] = 0 self.jointRelatives["inpivot"] = 0 self.jointRelatives["outpivot"] = 0 for i in range(self.div_count): self.relatives["%s_loc" % i] = self.fk_ctl[i] self.jointRelatives["%s_loc" % i] = i if self.div_count > 0: self.relatives["%s_loc" % self.div_count] = self.fk_ctl[-1] self.jointRelatives["%s_loc" % self.div_count] = self.div_count - 1 def addConnection(self): """Add more connection definition to the set""" self.connections["EPIC_leg_01"] = self.connect_leg_2jnt_01 self.connections["leg_2jnt_01"] = self.connect_leg_2jnt_01 self.connections["leg_ms_2jnt_01"] = self.connect_leg_ms_2jnt_01 self.connections["leg_3jnt_01"] = self.connect_leg_3jnt_01 def connect_leg_2jnt_01(self): """Connector for leg 2jnt""" # If the parent component hasn't been generated we skip the connection if self.parent_comp is None: return pm.connectAttr(self.parent_comp.blend_att, self.blend_att) pm.parent(self.root, self.parent_comp.ik_ctl) pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1]) pm.parentConstraint( self.parent_comp.tws2_rot, self.fk_ref, maintainOffset=True ) return def connect_leg_ms_2jnt_01(self): """Connector for leg ms 2jnt""" # If the parent component hasn't been generated we skip the connection if self.parent_comp is None: return pm.connectAttr(self.parent_comp.blend_att, self.blend_att) pm.parent(self.root, self.parent_comp.ik_ctl) pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1]) pm.parentConstraint( self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True ) cns = pm.scaleConstraint( self.parent_comp.fk_ref, self.parent_comp.ik_ref, self.fk_ref, wal=True, ) bc_node = pm.createNode("blendColors") pm.connectAttr( bc_node.outputB, cns + ".%sW0" % self.parent_comp.fk_ref ) pm.connectAttr( bc_node.outputR, cns + ".%sW1" % self.parent_comp.ik_ref ) pm.connectAttr(self.parent_comp.blend_att, bc_node.blender) return def connect_leg_3jnt_01(self): """Connector for leg 3jnt""" # If the parent component hasn't been generated we skip the connection if self.parent_comp is None: return pm.connectAttr(self.parent_comp.blend_att, self.blend_att) pm.parent(self.root, self.parent_comp.ik_ctl) pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1]) pm.parent(self.parent_comp.ik2b_ikCtl_ref, self.bk_ctl[-1]) pm.parentConstraint( self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True ) return
33.865702
79
0.505887
16,182
0.987249
0
0
0
0
0
0
2,893
0.176499
714e6f1bdf4058bf187b53f8c773baa127319b6d
546
py
Python
streams/blog/migrations/0012_auto_20200928_1212.py
Engerrs/ckan.org
a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158
[ "BSD-3-Clause" ]
1
2022-03-18T03:20:00.000Z
2022-03-18T03:20:00.000Z
streams/blog/migrations/0012_auto_20200928_1212.py
Engerrs/ckan.org
a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158
[ "BSD-3-Clause" ]
26
2021-07-07T08:42:42.000Z
2022-03-29T14:34:59.000Z
streams/blog/migrations/0012_auto_20200928_1212.py
Engerrs/ckan.org
a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158
[ "BSD-3-Clause" ]
3
2021-07-07T22:11:03.000Z
2021-09-15T18:19:10.000Z
# Generated by Django 3.1.1 on 2020-09-28 12:12 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0011_blogpostpage_featured'), ] operations = [ migrations.RemoveField( model_name='blogpostpage', name='date', ), migrations.AddField( model_name='blogpostpage', name='created', field=models.DateTimeField(blank=True, default=datetime.datetime.now), ), ]
22.75
82
0.600733
437
0.800366
0
0
0
0
0
0
124
0.227106
714e74c6035390e31e82cb8cc61f9783ca761b5f
58,939
py
Python
opac/webapp/main/views.py
rafaelpezzuto/opac
9b54202350e262a27cb9cb756a892185b288df24
[ "BSD-2-Clause" ]
null
null
null
opac/webapp/main/views.py
rafaelpezzuto/opac
9b54202350e262a27cb9cb756a892185b288df24
[ "BSD-2-Clause" ]
null
null
null
opac/webapp/main/views.py
rafaelpezzuto/opac
9b54202350e262a27cb9cb756a892185b288df24
[ "BSD-2-Clause" ]
null
null
null
# coding: utf-8 import logging import requests import mimetypes from io import BytesIO from urllib.parse import urlparse from datetime import datetime, timedelta from collections import OrderedDict from flask_babelex import gettext as _ from flask import ( render_template, abort, current_app, request, session, redirect, jsonify, url_for, Response, send_from_directory, g, make_response, ) from werkzeug.contrib.atom import AtomFeed from urllib.parse import urljoin from legendarium.formatter import descriptive_short_format from . import main from webapp import babel from webapp import cache from webapp import controllers from webapp.choices import STUDY_AREAS from webapp.utils import utils from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs from webapp import forms from webapp.config.lang_names import display_original_lang_name from opac_schema.v1.models import Journal, Issue, Article, Collection from lxml import etree from packtools import HTMLGenerator logger = logging.getLogger(__name__) JOURNAL_UNPUBLISH = _("O periódico está indisponível por motivo de: ") ISSUE_UNPUBLISH = _("O número está indisponível por motivo de: ") ARTICLE_UNPUBLISH = _("O artigo está indisponível por motivo de: ") IAHX_LANGS = dict( p='pt', e='es', i='en', ) def url_external(endpoint, **kwargs): url = url_for(endpoint, **kwargs) return urljoin(request.url_root, url) class RetryableError(Exception): """Erro recuperável sem que seja necessário modificar o estado dos dados na parte cliente, e.g., timeouts, erros advindos de particionamento de rede etc. """ class NonRetryableError(Exception): """Erro do qual não pode ser recuperado sem modificar o estado dos dados na parte cliente, e.g., recurso solicitado não exite, URI inválida etc. """ def fetch_data(url: str, timeout: float = 2) -> bytes: try: response = requests.get(url, timeout=timeout) except (requests.ConnectionError, requests.Timeout) as exc: raise RetryableError(exc) from exc except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc: raise NonRetryableError(exc) from exc else: try: response.raise_for_status() except requests.HTTPError as exc: if 400 <= exc.response.status_code < 500: raise NonRetryableError(exc) from exc elif 500 <= exc.response.status_code < 600: raise RetryableError(exc) from exc else: raise return response.content @main.before_app_request def add_collection_to_g(): if not hasattr(g, 'collection'): try: collection = controllers.get_current_collection() setattr(g, 'collection', collection) except Exception: # discutir o que fazer aqui setattr(g, 'collection', {}) @main.after_request def add_header(response): response.headers['x-content-type-options'] = 'nosniff' return response @main.after_request def add_language_code(response): language = session.get('lang', get_locale()) response.set_cookie('language', language) return response @main.before_app_request def add_forms_to_g(): setattr(g, 'email_share', forms.EmailShareForm()) setattr(g, 'email_contact', forms.ContactForm()) setattr(g, 'error', forms.ErrorForm()) @main.before_app_request def add_scielo_org_config_to_g(): language = session.get('lang', get_locale()) scielo_org_links = { key: url[language] for key, url in current_app.config.get('SCIELO_ORG_URIS', {}).items() } setattr(g, 'scielo_org', scielo_org_links) @babel.localeselector def get_locale(): langs = current_app.config.get('LANGUAGES') lang_from_headers = request.accept_languages.best_match(list(langs.keys())) if 'lang' not in list(session.keys()): session['lang'] = lang_from_headers if not lang_from_headers and not session['lang']: # Caso não seja possível detectar o idioma e não tenhamos a chave lang # no seção, fixamos o idioma padrão. session['lang'] = current_app.config.get('BABEL_DEFAULT_LOCALE') return session['lang'] @main.route('/set_locale/<string:lang_code>/') def set_locale(lang_code): langs = current_app.config.get('LANGUAGES') if lang_code not in list(langs.keys()): abort(400, _('Código de idioma inválido')) referrer = request.referrer hash = request.args.get('hash') if hash: referrer += "#" + hash # salvar o lang code na sessão session['lang'] = lang_code return redirect(referrer) def get_lang_from_session(): """ Tenta retornar o idioma da seção, caso não consiga retorna BABEL_DEFAULT_LOCALE. """ try: return session['lang'] except KeyError: return current_app.config.get('BABEL_DEFAULT_LOCALE') @main.route('/') @cache.cached(key_prefix=cache_key_with_lang) def index(): language = session.get('lang', get_locale()) news = controllers.get_latest_news_by_lang(language) tweets = controllers.get_collection_tweets() press_releases = controllers.get_press_releases({'language': language}) urls = { 'downloads': '{0}/w/accesses?collection={1}'.format( current_app.config['METRICS_URL'], current_app.config['OPAC_COLLECTION']), 'references': '{0}/w/publication/size?collection={1}'.format( current_app.config['METRICS_URL'], current_app.config['OPAC_COLLECTION']), 'other': '{0}/?collection={1}'.format( current_app.config['METRICS_URL'], current_app.config['OPAC_COLLECTION']) } if ( g.collection is not None and isinstance(g.collection, Collection) and g.collection.metrics is not None and current_app.config['USE_HOME_METRICS'] ): g.collection.metrics.total_journal = Journal.objects.filter( is_public=True, current_status="current" ).count() g.collection.metrics.total_article = Article.objects.filter( is_public=True ).count() context = { 'news': news, 'urls': urls, 'tweets': tweets, 'press_releases': press_releases, } return render_template("collection/index.html", **context) # ##################################Collection################################### @main.route('/journals/alpha') @cache.cached(key_prefix=cache_key_with_lang) def collection_list(): allowed_filters = ["current", "no-current", ""] query_filter = request.args.get("status", "") if not query_filter in allowed_filters: query_filter = "" journals_list = [ controllers.get_journal_json_data(journal) for journal in controllers.get_journals(query_filter=query_filter) ] return render_template("collection/list_journal.html", **{'journals_list': journals_list, 'query_filter': query_filter}) @main.route("/journals/thematic") @cache.cached(key_prefix=cache_key_with_lang) def collection_list_thematic(): allowed_query_filters = ["current", "no-current", ""] allowed_thematic_filters = ["areas", "wos", "publisher"] thematic_table = { "areas": "study_areas", "wos": "subject_categories", "publisher": "publisher_name", } query_filter = request.args.get("status", "") title_query = request.args.get("query", "") thematic_filter = request.args.get("filter", "areas") if not query_filter in allowed_query_filters: query_filter = "" if not thematic_filter in allowed_thematic_filters: thematic_filter = "areas" lang = get_lang_from_session()[:2].lower() objects = controllers.get_journals_grouped_by( thematic_table[thematic_filter], title_query, query_filter=query_filter, lang=lang, ) return render_template( "collection/list_thematic.html", **{"objects": objects, "query_filter": query_filter, "filter": thematic_filter} ) @main.route('/journals/feed/') @cache.cached(key_prefix=cache_key_with_lang) def collection_list_feed(): language = session.get('lang', get_locale()) collection = controllers.get_current_collection() title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção')) subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name) feed = AtomFeed(title, subtitle=subtitle, feed_url=request.url, url=request.url_root) journals = controllers.get_journals_paginated( title_query='', page=1, order_by='-created', per_page=10) if not journals.items: feed.add('Nenhum periódico encontrado', url=request.url, updated=datetime.now()) for journal in journals.items: issues = controllers.get_issues_by_jid(journal.jid, is_public=True) last_issue = issues[0] if issues else None articles = [] if last_issue: articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True) result_dict = OrderedDict() for article in articles: section = article.get_section_by_lang(language[:2]) result_dict.setdefault(section, []) result_dict[section].append(article) context = { 'journal': journal, 'articles': result_dict, 'language': language, 'last_issue': last_issue } feed.add(journal.title, render_template("collection/list_feed_content.html", **context), content_type='html', author=journal.publisher_name, url=url_external('main.journal_detail', url_seg=journal.url_segment), updated=journal.updated, published=journal.created) return feed.get_response() @main.route("/about/", methods=['GET']) @main.route('/about/<string:slug_name>', methods=['GET']) @cache.cached(key_prefix=cache_key_with_lang_with_qs) def about_collection(slug_name=None): language = session.get('lang', get_locale()) context = {} page = None if slug_name: # caso seja uma página page = controllers.get_page_by_slug_name(slug_name, language) if not page: abort(404, _('Página não encontrada')) context['page'] = page else: # caso não seja uma página é uma lista pages = controllers.get_pages_by_lang(language) context['pages'] = pages return render_template("collection/about.html", **context) # ###################################Journal##################################### @main.route('/scielo.php/') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def router_legacy(): script_php = request.args.get('script', None) pid = request.args.get('pid', None) tlng = request.args.get('tlng', None) allowed_scripts = [ 'sci_serial', 'sci_issuetoc', 'sci_arttext', 'sci_abstract', 'sci_issues', 'sci_pdf' ] if (script_php is not None) and (script_php in allowed_scripts) and not pid: # se tem pelo menos um param: pid ou script_php abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)) elif script_php and pid: if script_php == 'sci_serial': # pid = issn journal = controllers.get_journal_by_issn(pid) if not journal: abort(404, _('Periódico não encontrado')) if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) return redirect(url_for('main.journal_detail', url_seg=journal.url_segment), code=301) elif script_php == 'sci_issuetoc': issue = controllers.get_issue_by_pid(pid) if not issue: abort(404, _('Número não encontrado')) if not issue.is_public: abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason)) if not issue.journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason)) if issue.url_segment and "ahead" in issue.url_segment: return redirect( url_for('main.aop_toc', url_seg=url_seg), code=301) return redirect( url_for( "main.issue_toc", url_seg=issue.journal.url_segment, url_seg_issue=issue.url_segment), 301 ) elif script_php == 'sci_arttext' or script_php == 'sci_abstract': article = controllers.get_article_by_pid_v2(pid) if not article: abort(404, _('Artigo não encontrado')) # 'abstract' or None (not False, porque False converterá a string 'False') part = (script_php == 'sci_abstract' and 'abstract') or None if tlng not in article.languages: tlng = article.original_language return redirect(url_for('main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article.aid, part=part, lang=tlng), code=301) elif script_php == 'sci_issues': journal = controllers.get_journal_by_issn(pid) if not journal: abort(404, _('Periódico não encontrado')) if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) return redirect(url_for('main.issue_grid', url_seg=journal.url_segment), 301) elif script_php == 'sci_pdf': # accesso ao pdf do artigo: article = controllers.get_article_by_pid_v2(pid) if not article: abort(404, _('Artigo não encontrado')) return redirect( url_for( 'main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article.aid, format='pdf', ), code=301 ) else: abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)) else: return redirect('/') @main.route('/<string:journal_seg>') @main.route('/journal/<string:journal_seg>') def journal_detail_legacy_url(journal_seg): return redirect(url_for('main.journal_detail', url_seg=journal_seg), code=301) @main.route('/j/<string:url_seg>/') @cache.cached(key_prefix=cache_key_with_lang) def journal_detail(url_seg): journal = controllers.get_journal_by_url_seg(url_seg) if not journal: abort(404, _('Periódico não encontrado')) if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) utils.fix_journal_last_issue(journal) # todo: ajustar para que seja só noticias relacionadas ao periódico language = session.get('lang', get_locale()) news = controllers.get_latest_news_by_lang(language) # Press releases press_releases = controllers.get_press_releases({ 'journal': journal, 'language': language}) # Lista de seções # Mantendo sempre o idioma inglês para as seções na página incial do periódico if journal.last_issue and journal.current_status == "current": sections = [section for section in journal.last_issue.sections if section.language == 'en'] recent_articles = controllers.get_recent_articles_of_issue(journal.last_issue.iid, is_public=True) else: sections = [] recent_articles = [] latest_issue = journal.last_issue if latest_issue: latest_issue_legend = descriptive_short_format( title=journal.title, short_title=journal.short_title, pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number, suppl=latest_issue.suppl_text, language=language[:2].lower()) else: latest_issue_legend = '' journal_metrics = controllers.get_journal_metrics(journal) context = { 'journal': journal, 'press_releases': press_releases, 'recent_articles': recent_articles, 'journal_study_areas': [ STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas ], # o primiero item da lista é o último número. # condicional para verificar se issues contém itens 'last_issue': latest_issue, 'latest_issue_legend': latest_issue_legend, 'sections': sections if sections else None, 'news': news, 'journal_metrics': journal_metrics } return render_template("journal/detail.html", **context) @main.route('/journal/<string:url_seg>/feed/') @cache.cached(key_prefix=cache_key_with_lang) def journal_feed(url_seg): journal = controllers.get_journal_by_url_seg(url_seg) if not journal: abort(404, _('Periódico não encontrado')) if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) issues = controllers.get_issues_by_jid(journal.jid, is_public=True) last_issue = issues[0] if issues else None articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True) feed = AtomFeed(journal.title, feed_url=request.url, url=request.url_root, subtitle=utils.get_label_issue(last_issue)) feed_language = session.get('lang', get_locale()) feed_language = feed_language[:2].lower() for article in articles: # ######### TODO: Revisar ######### article_lang = feed_language if feed_language not in article.languages: article_lang = article.original_language feed.add(article.title or _('Artigo sem título'), render_template("issue/feed_content.html", article=article), content_type='html', id=article.doi or article.pid, author=article.authors, url=url_external('main.article_detail_v3', url_seg=journal.url_segment, article_pid_v3=article.aid, lang=article_lang), updated=journal.updated, published=journal.created) return feed.get_response() @main.route("/journal/<string:url_seg>/about/", methods=['GET']) @cache.cached(key_prefix=cache_key_with_lang) def about_journal(url_seg): language = session.get('lang', get_locale()) journal = controllers.get_journal_by_url_seg(url_seg) if not journal: abort(404, _('Periódico não encontrado')) if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) latest_issue = utils.fix_journal_last_issue(journal) if latest_issue: latest_issue_legend = descriptive_short_format( title=journal.title, short_title=journal.short_title, pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number, suppl=latest_issue.suppl_text, language=language[:2].lower()) else: latest_issue_legend = None page = controllers.get_page_by_journal_acron_lang(journal.acronym, language) context = { 'journal': journal, 'latest_issue_legend': latest_issue_legend, 'last_issue': latest_issue, 'journal_study_areas': [ STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas ], } if page: context['content'] = page.content if page.updated_at: context['page_updated_at'] = page.updated_at return render_template("journal/about.html", **context) @main.route("/journals/search/alpha/ajax/", methods=['GET', ]) @cache.cached(key_prefix=cache_key_with_lang_with_qs) def journals_search_alpha_ajax(): if not request.is_xhr: abort(400, _('Requisição inválida. Deve ser por ajax')) query = request.args.get('query', '', type=str) query_filter = request.args.get('query_filter', '', type=str) page = request.args.get('page', 1, type=int) lang = get_lang_from_session()[:2].lower() response_data = controllers.get_alpha_list_from_paginated_journals( title_query=query, query_filter=query_filter, page=page, lang=lang) return jsonify(response_data) @main.route("/journals/search/group/by/filter/ajax/", methods=['GET']) @cache.cached(key_prefix=cache_key_with_lang_with_qs) def journals_search_by_theme_ajax(): if not request.is_xhr: abort(400, _('Requisição inválida. Deve ser por ajax')) query = request.args.get('query', '', type=str) query_filter = request.args.get('query_filter', '', type=str) filter = request.args.get('filter', 'areas', type=str) lang = get_lang_from_session()[:2].lower() if filter == 'areas': objects = controllers.get_journals_grouped_by('study_areas', query, query_filter=query_filter, lang=lang) elif filter == 'wos': objects = controllers.get_journals_grouped_by('subject_categories', query, query_filter=query_filter, lang=lang) elif filter == 'publisher': objects = controllers.get_journals_grouped_by('publisher_name', query, query_filter=query_filter, lang=lang) else: return jsonify({ 'error': 401, 'message': _('Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".') }) return jsonify(objects) @main.route("/journals/download/<string:list_type>/<string:extension>/", methods=['GET', ]) @cache.cached(key_prefix=cache_key_with_lang_with_qs) def download_journal_list(list_type, extension): if extension.lower() not in ['csv', 'xls']: abort(401, _('Parámetro "extension" é inválido, deve ser "csv" ou "xls".')) elif list_type.lower() not in ['alpha', 'areas', 'wos', 'publisher']: abort(401, _('Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".')) else: if extension.lower() == 'xls': mimetype = 'application/vnd.ms-excel' else: mimetype = 'text/csv' query = request.args.get('query', '', type=str) data = controllers.get_journal_generator_for_csv(list_type=list_type, title_query=query, extension=extension.lower()) timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') filename = 'journals_%s_%s.%s' % (list_type, timestamp, extension) response = Response(data, mimetype=mimetype) response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename return response @main.route("/<string:url_seg>/contact", methods=['POST']) def contact(url_seg): if not request.is_xhr: abort(403, _('Requisição inválida, deve ser ajax.')) if utils.is_recaptcha_valid(request): form = forms.ContactForm(request.form) journal = controllers.get_journal_by_url_seg(url_seg) if not journal.enable_contact: abort(403, _('Periódico não permite envio de email.')) recipients = journal.editor_email if form.validate(): sent, message = controllers.send_email_contact(recipients, form.data['name'], form.data['your_email'], form.data['message']) return jsonify({'sent': sent, 'message': str(message), 'fields': [key for key in form.data.keys()]}) else: return jsonify({'sent': False, 'message': form.errors, 'fields': [key for key in form.data.keys()]}) else: abort(400, _('Requisição inválida, captcha inválido.')) @main.route("/form_contact/<string:url_seg>/", methods=['GET']) def form_contact(url_seg): journal = controllers.get_journal_by_url_seg(url_seg) if not journal: abort(404, _('Periódico não encontrado')) context = { 'journal': journal } return render_template("journal/includes/contact_form.html", **context) # ###################################Issue####################################### @main.route('/grid/<string:url_seg>/') def issue_grid_legacy(url_seg): return redirect(url_for('main.issue_grid', url_seg=url_seg), 301) @main.route('/j/<string:url_seg>/grid') @cache.cached(key_prefix=cache_key_with_lang) def issue_grid(url_seg): journal = controllers.get_journal_by_url_seg(url_seg) if not journal: abort(404, _('Periódico não encontrado')) if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) # idioma da sessão language = session.get('lang', get_locale()) # A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order" issues_data = controllers.get_issues_for_grid_by_jid(journal.id, is_public=True) latest_issue = issues_data['last_issue'] if latest_issue: latest_issue_legend = descriptive_short_format( title=journal.title, short_title=journal.short_title, pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number, suppl=latest_issue.suppl_text, language=language[:2].lower()) else: latest_issue_legend = None context = { 'journal': journal, 'last_issue': issues_data['last_issue'], 'latest_issue_legend': latest_issue_legend, 'volume_issue': issues_data['volume_issue'], 'ahead': issues_data['ahead'], 'result_dict': issues_data['ordered_for_grid'], 'journal_study_areas': [ STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas ], } return render_template("issue/grid.html", **context) @main.route('/toc/<string:url_seg>/<string:url_seg_issue>/') def issue_toc_legacy(url_seg, url_seg_issue): if url_seg_issue and "ahead" in url_seg_issue: return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301) return redirect( url_for('main.issue_toc', url_seg=url_seg, url_seg_issue=url_seg_issue), code=301) @main.route('/j/<string:url_seg>/i/<string:url_seg_issue>/') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def issue_toc(url_seg, url_seg_issue): section_filter = None goto = request.args.get("goto", None, type=str) if goto not in ("previous", "next"): goto = None if goto in (None, "next") and "ahead" in url_seg_issue: # redireciona para `aop_toc` return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301) # idioma da sessão language = session.get('lang', get_locale()) if current_app.config["FILTER_SECTION_ENABLE"]: # seção dos documentos, se selecionada section_filter = request.args.get('section', '', type=str).upper() # obtém o issue issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue) if not issue: abort(404, _('Número não encontrado')) if not issue.is_public: abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason)) # obtém o journal journal = issue.journal if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) # completa url_segment do last_issue utils.fix_journal_last_issue(journal) # goto_next_or_previous_issue (redireciona) goto_url = goto_next_or_previous_issue( issue, request.args.get('goto', None, type=str)) if goto_url: return redirect(goto_url, code=301) # obtém os documentos articles = controllers.get_articles_by_iid(issue.iid, is_public=True) if articles: # obtém TODAS as seções dos documentos deste sumário sections = sorted({a.section.upper() for a in articles if a.section}) else: # obtém as seções dos documentos deste sumário sections = [] if current_app.config["FILTER_SECTION_ENABLE"] and section_filter != '': # obtém somente os documentos da seção selecionada articles = [a for a in articles if a.section.upper() == section_filter] # obtém PDF e TEXT de cada documento has_math_content = False for article in articles: article_text_languages = [doc['lang'] for doc in article.htmls] article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs] setattr(article, "article_text_languages", article_text_languages) setattr(article, "article_pdf_languages", article_pdf_languages) if 'mml:' in article.title: has_math_content = True # obtém a legenda bibliográfica issue_bibliographic_strip = descriptive_short_format( title=journal.title, short_title=journal.short_title, pubdate=str(issue.year), volume=issue.volume, number=issue.number, suppl=issue.suppl_text, language=language[:2].lower()) context = { 'this_page_url': url_for( 'main.issue_toc', url_seg=url_seg, url_seg_issue=url_seg_issue), 'has_math_content': has_math_content, 'journal': journal, 'issue': issue, 'issue_bibliographic_strip': issue_bibliographic_strip, 'articles': articles, 'sections': sections, 'section_filter': section_filter, 'journal_study_areas': [ STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas ], 'last_issue': journal.last_issue } return render_template("issue/toc.html", **context) def goto_next_or_previous_issue(current_issue, goto_param): if goto_param not in ["next", "previous"]: return None all_issues = list( controllers.get_issues_by_jid(current_issue.journal.id, is_public=True)) if goto_param == "next": selected_issue = utils.get_next_issue(all_issues, current_issue) elif goto_param == "previous": selected_issue = utils.get_prev_issue(all_issues, current_issue) if selected_issue in (None, current_issue): # nao precisa redirecionar return None try: url_seg_issue = selected_issue.url_segment except AttributeError: return None else: return url_for('main.issue_toc', url_seg=selected_issue.journal.url_segment, url_seg_issue=url_seg_issue) def get_next_or_previous_issue(current_issue, goto_param): if goto_param not in ["next", "previous"]: return current_issue all_issues = list( controllers.get_issues_by_jid(current_issue.journal.id, is_public=True)) if goto_param == "next": return utils.get_next_issue(all_issues, current_issue) return utils.get_prev_issue(all_issues, current_issue) @main.route('/j/<string:url_seg>/aop') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def aop_toc(url_seg): section_filter = request.args.get('section', '', type=str).upper() aop_issues = controllers.get_aop_issues(url_seg) or [] if not aop_issues: abort(404, _('Artigos ahead of print não encontrados')) goto = request.args.get("goto", None, type=str) if goto == "previous": url = goto_next_or_previous_issue(aop_issues[-1], goto) if url: redirect(url, code=301) journal = aop_issues[0].journal if not journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason)) utils.fix_journal_last_issue(journal) articles = [] for aop_issue in aop_issues: _articles = controllers.get_articles_by_iid( aop_issue.iid, is_public=True) if _articles: articles.extend(_articles) if not articles: abort(404, _('Artigos ahead of print não encontrados')) sections = sorted({a.section.upper() for a in articles if a.section}) if section_filter != '': articles = [a for a in articles if a.section.upper() == section_filter] for article in articles: article_text_languages = [doc['lang'] for doc in article.htmls] article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs] setattr(article, "article_text_languages", article_text_languages) setattr(article, "article_pdf_languages", article_pdf_languages) context = { 'this_page_url': url_for("main.aop_toc", url_seg=url_seg), 'journal': journal, 'issue': aop_issues[0], 'issue_bibliographic_strip': "ahead of print", 'articles': articles, 'sections': sections, 'section_filter': section_filter, 'journal_study_areas': [ STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas ], # o primeiro item da lista é o último número. 'last_issue': journal.last_issue } return render_template("issue/toc.html", **context) @main.route('/feed/<string:url_seg>/<string:url_seg_issue>/') @cache.cached(key_prefix=cache_key_with_lang) def issue_feed(url_seg, url_seg_issue): issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue) if not issue: abort(404, _('Número não encontrado')) if not issue.is_public: abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason)) if not issue.journal.is_public: abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason)) journal = issue.journal articles = controllers.get_articles_by_iid(issue.iid, is_public=True) feed = AtomFeed(journal.title or "", feed_url=request.url, url=request.url_root, subtitle=utils.get_label_issue(issue)) feed_language = session.get('lang', get_locale()) for article in articles: # ######### TODO: Revisar ######### article_lang = feed_language if feed_language not in article.languages: article_lang = article.original_language feed.add(article.title or 'Unknow title', render_template("issue/feed_content.html", article=article), content_type='html', author=article.authors, id=article.doi or article.pid, url=url_external('main.article_detail_v3', url_seg=journal.url_segment, article_pid_v3=article.aid, lang=article_lang), updated=journal.updated, published=journal.created) return feed.get_response() # ##################################Article###################################### @main.route('/article/<regex("S\d{4}-\d{3}[0-9xX][0-2][0-9]{3}\d{4}\d{5}"):pid>/') @cache.cached(key_prefix=cache_key_with_lang) def article_detail_pid(pid): article = controllers.get_article_by_pid(pid) if not article: article = controllers.get_article_by_oap_pid(pid) if not article: abort(404, _('Artigo não encontrado')) return redirect(url_for('main.article_detail_v3', url_seg=article.journal.acronym, article_pid_v3=article.aid)) def render_html_from_xml(article, lang, gs_abstract=False): logger.debug("Get XML: %s", article.xml) if current_app.config["SSM_XML_URL_REWRITE"]: result = fetch_data(use_ssm_url(article.xml)) else: result = fetch_data(article.xml) xml = etree.parse(BytesIO(result)) generator = HTMLGenerator.parse( xml, valid_only=False, gs_abstract=gs_abstract, output_style="website") return generator.generate(lang), generator.languages def render_html_from_html(article, lang): html_url = [html for html in article.htmls if html['lang'] == lang] try: html_url = html_url[0]['url'] except IndexError: raise ValueError('Artigo não encontrado') from None result = fetch_data(use_ssm_url(html_url)) html = result.decode('utf8') text_languages = [html['lang'] for html in article.htmls] return html, text_languages def render_html_abstract(article, lang): abstract_text = '' for abstract in article.abstracts: if abstract['language'] == lang: abstract_text = abstract["text"] break return abstract_text, article.abstract_languages def render_html(article, lang, gs_abstract=False): if article.xml: return render_html_from_xml(article, lang, gs_abstract) elif article.htmls: if gs_abstract: return render_html_abstract(article, lang) return render_html_from_html(article, lang) else: # TODO: Corrigir os teste que esperam ter o atributo ``htmls`` # O ideal seria levantar um ValueError. return '', [] # TODO: Remover assim que o valor Article.xml estiver consistente na base de # dados def use_ssm_url(url): """Normaliza a string `url` de acordo com os valores das diretivas de configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT. A normalização busca obter uma URL absoluta em função de uma relativa, ou uma absoluta em função de uma absoluta, mas com as partes *scheme* e *authority* trocadas pelas definidas nas diretivas citadas anteriormente. Este código deve ser removido assim que o valor de Article.xml estiver consistente, i.e., todos os registros possuirem apenas URLs absolutas. """ if url.startswith("http"): parsed_url = urlparse(url) return current_app.config["SSM_BASE_URI"] + parsed_url.path else: return current_app.config["SSM_BASE_URI"] + url @main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/') @main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>/') @main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/') @main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>/') @cache.cached(key_prefix=cache_key_with_lang) def article_detail(url_seg, url_seg_issue, url_seg_article, lang_code=''): issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue) if not issue: abort(404, _('Issue não encontrado')) article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article) if article is None: article = controllers.get_article_by_aop_url_segs( issue.journal, url_seg_issue, url_seg_article ) if article is None: abort(404, _('Artigo não encontrado')) req_params = { "url_seg": article.journal.acronym, "article_pid_v3": article.aid, } if lang_code: req_params["lang"] = lang_code return redirect(url_for('main.article_detail_v3', **req_params)) @main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/') @main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/<string:part>/') @cache.cached(key_prefix=cache_key_with_lang) def article_detail_v3(url_seg, article_pid_v3, part=None): qs_lang = request.args.get('lang', type=str) or None qs_goto = request.args.get('goto', type=str) or None qs_stop = request.args.get('stop', type=str) or None qs_format = request.args.get('format', 'html', type=str) gs_abstract = (part == "abstract") if part and not gs_abstract: abort(404, _("Não existe '{}'. No seu lugar use '{}'" ).format(part, 'abstract')) try: qs_lang, article = controllers.get_article( article_pid_v3, url_seg, qs_lang, gs_abstract, qs_goto) if qs_goto: return redirect( url_for( 'main.article_detail_v3', url_seg=url_seg, article_pid_v3=article.aid, part=part, format=qs_format, lang=qs_lang, stop=getattr(article, 'stop', None), ), code=301 ) except (controllers.PreviousOrNextArticleNotFoundError) as e: if gs_abstract: abort(404, _('Resumo inexistente')) abort(404, _('Artigo inexistente')) except (controllers.ArticleNotFoundError, controllers.ArticleJournalNotFoundError): abort(404, _('Artigo não encontrado')) except controllers.ArticleLangNotFoundError: return redirect( url_for( 'main.article_detail_v3', url_seg=url_seg, article_pid_v3=article_pid_v3, format=qs_format, ), code=301 ) except controllers.ArticleAbstractNotFoundError: abort(404, _('Recurso não encontrado')) except controllers.ArticleIsNotPublishedError as e: abort(404, "{}{}".format(ARTICLE_UNPUBLISH, e)) except controllers.IssueIsNotPublishedError as e: abort(404, "{}{}".format(ISSUE_UNPUBLISH, e)) except controllers.JournalIsNotPublishedError as e: abort(404, "{}{}".format(JOURNAL_UNPUBLISH, e)) except ValueError as e: abort(404, str(e)) def _handle_html(): citation_pdf_url = None for pdf_data in article.pdfs: if pdf_data.get("lang") == qs_lang: citation_pdf_url = url_for( 'main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article_pid_v3, lang=qs_lang, format="pdf", ) break website = request.url if website: parsed_url = urlparse(request.url) if current_app.config["FORCE_USE_HTTPS_GOOGLE_TAGS"]: website = "{}://{}".format('https', parsed_url.netloc) else: website = "{}://{}".format(parsed_url.scheme, parsed_url.netloc) if citation_pdf_url: citation_pdf_url = "{}{}".format(website, citation_pdf_url) try: html, text_languages = render_html(article, qs_lang, gs_abstract) except (ValueError, NonRetryableError): abort(404, _('HTML do Artigo não encontrado ou indisponível')) except RetryableError: abort(500, _('Erro inesperado')) text_versions = sorted( [ ( lang, display_original_lang_name(lang), url_for( 'main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article_pid_v3, lang=lang ) ) for lang in text_languages ] ) citation_xml_url = "{}{}".format( website, url_for( 'main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article_pid_v3, format="xml", lang=article.original_language, ) ) context = { 'next_article': qs_stop != 'next', 'previous_article': qs_stop != 'previous', 'article': article, 'journal': article.journal, 'issue': article.issue, 'html': html, 'citation_pdf_url': citation_pdf_url, 'citation_xml_url': citation_xml_url, 'article_lang': qs_lang, 'text_versions': text_versions, 'related_links': controllers.related_links(article), 'gs_abstract': gs_abstract, 'part': part, } return render_template("article/detail.html", **context) def _handle_pdf(): if not article.pdfs: abort(404, _('PDF do Artigo não encontrado')) pdf_info = [pdf for pdf in article.pdfs if pdf['lang'] == qs_lang] if len(pdf_info) != 1: abort(404, _('PDF do Artigo não encontrado')) try: pdf_url = pdf_info[0]['url'] except (IndexError, KeyError, ValueError, TypeError): abort(404, _('PDF do Artigo não encontrado')) if pdf_url: return get_pdf_content(pdf_url) raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!')) def _handle_xml(): if current_app.config["SSM_XML_URL_REWRITE"]: result = fetch_data(use_ssm_url(article.xml)) else: result = fetch_data(article.xml) response = make_response(result) response.headers['Content-Type'] = 'application/xml' return response if 'html' == qs_format: return _handle_html() elif 'pdf' == qs_format: return _handle_pdf() elif 'xml' == qs_format: return _handle_xml() else: abort(400, _('Formato não suportado')) @main.route('/readcube/epdf/') @main.route('/readcube/epdf.php') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def article_epdf(): doi = request.args.get('doi', None, type=str) pid = request.args.get('pid', None, type=str) pdf_path = request.args.get('pdf_path', None, type=str) lang = request.args.get('lang', None, type=str) if not all([doi, pid, pdf_path, lang]): abort(400, _('Parâmetros insuficientes para obter o EPDF do artigo')) else: context = { 'doi': doi, 'pid': pid, 'pdf_path': pdf_path, 'lang': lang, } return render_template("article/epdf.html", **context) def get_pdf_content(url): logger.debug("Get PDF: %s", url) if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]: url = use_ssm_url(url) try: response = fetch_data(url) except NonRetryableError: abort(404, _('PDF não encontrado')) except RetryableError: abort(500, _('Erro inesperado')) else: mimetype, __ = mimetypes.guess_type(url) return Response(response, mimetype=mimetype) @cache.cached(key_prefix=cache_key_with_lang_with_qs) def get_content_from_ssm(resource_ssm_media_path): resource_ssm_full_url = current_app.config['SSM_BASE_URI'] + resource_ssm_media_path url = resource_ssm_full_url.strip() mimetype, __ = mimetypes.guess_type(url) try: ssm_response = fetch_data(url) except NonRetryableError: abort(404, _('Recurso não encontrado')) except RetryableError: abort(500, _('Erro inesperado')) else: return Response(ssm_response, mimetype=mimetype) @main.route('/media/assets/<regex("(.*)"):relative_media_path>') @cache.cached(key_prefix=cache_key_with_lang) def media_assets_proxy(relative_media_path): resource_ssm_path = '{ssm_media_path}{resource_path}'.format( ssm_media_path=current_app.config['SSM_MEDIA_PATH'], resource_path=relative_media_path) return get_content_from_ssm(resource_ssm_path) @main.route('/article/ssm/content/raw/') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def article_ssm_content_raw(): resource_ssm_path = request.args.get('resource_ssm_path', None) if not resource_ssm_path: raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!')) else: return get_content_from_ssm(resource_ssm_path) @main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>') @main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>') @main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>') @main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>') @cache.cached(key_prefix=cache_key_with_lang) def article_detail_pdf(url_seg, url_seg_issue, url_seg_article, lang_code=''): """ Padrões esperados: `/pdf/csc/2021.v26suppl1/2557-2558` `/pdf/csc/2021.v26suppl1/2557-2558/en` """ if not lang_code and "." not in url_seg_issue: return router_legacy_pdf(url_seg, url_seg_issue, url_seg_article) issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue) if not issue: abort(404, _('Issue não encontrado')) article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article) if not article: abort(404, _('Artigo não encontrado')) req_params = { 'url_seg': article.journal.url_segment, 'article_pid_v3': article.aid, 'format': 'pdf', } if lang_code: req_params['lang'] = lang_code return redirect(url_for('main.article_detail_v3', **req_params), code=301) @main.route('/pdf/<string:journal_acron>/<string:issue_info>/<string:pdf_filename>.pdf') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def router_legacy_pdf(journal_acron, issue_info, pdf_filename): pdf_filename = '%s.pdf' % pdf_filename journal = controllers.get_journal_by_url_seg(journal_acron) if not journal: abort(404, _('Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org')) article = controllers.get_article_by_pdf_filename( journal_acron, issue_info, pdf_filename) if not article: abort(404, _('PDF do artigo não foi encontrado')) return redirect( url_for( 'main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article.aid, format='pdf', lang=article._pdf_lang, ), code=301 ) @main.route('/cgi-bin/fbpe/<string:text_or_abstract>/') @cache.cached(key_prefix=cache_key_with_lang_with_qs) def router_legacy_article(text_or_abstract): pid = request.args.get('pid', None) lng = request.args.get('lng', None) if not (text_or_abstract in ['fbtext', 'fbabs'] and pid): # se tem pid abort(400, _('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid)) article = controllers.get_article_by_pid_v1(pid) if not article: abort(404, _('Artigo não encontrado')) return redirect( url_for( 'main.article_detail_v3', url_seg=article.journal.url_segment, article_pid_v3=article.aid, ), code=301 ) # ###############################E-mail share################################## @main.route("/email_share_ajax/", methods=['POST']) def email_share_ajax(): if not request.is_xhr: abort(400, _('Requisição inválida.')) form = forms.EmailShareForm(request.form) if form.validate(): recipients = [email.strip() for email in form.data['recipients'].split(';') if email.strip() != ''] sent, message = controllers.send_email_share(form.data['your_email'], recipients, form.data['share_url'], form.data['subject'], form.data['comment']) return jsonify({'sent': sent, 'message': str(message), 'fields': [key for key in form.data.keys()]}) else: return jsonify({'sent': False, 'message': form.errors, 'fields': [key for key in form.data.keys()]}) @main.route("/form_mail/", methods=['GET']) def email_form(): context = {'url': request.args.get('url')} return render_template("email/email_form.html", **context) @main.route("/email_error_ajax/", methods=['POST']) def email_error_ajax(): if not request.is_xhr: abort(400, _('Requisição inválida.')) form = forms.ErrorForm(request.form) if form.validate(): recipients = [email.strip() for email in current_app.config.get('EMAIL_ACCOUNTS_RECEIVE_ERRORS') if email.strip() != ''] sent, message = controllers.send_email_error(form.data['name'], form.data['your_email'], recipients, form.data['url'], form.data['error_type'], form.data['message'], form.data['page_title']) return jsonify({'sent': sent, 'message': str(message), 'fields': [key for key in form.data.keys()]}) else: return jsonify({'sent': False, 'message': form.errors, 'fields': [key for key in form.data.keys()]}) @main.route("/error_mail/", methods=['GET']) def error_form(): context = {'url': request.args.get('url')} return render_template("includes/error_form.html", **context) # ###############################Others######################################## @main.route("/media/<path:filename>/", methods=['GET']) @cache.cached(key_prefix=cache_key_with_lang) def download_file_by_filename(filename): media_root = current_app.config['MEDIA_ROOT'] return send_from_directory(media_root, filename) @main.route("/img/scielo.gif", methods=['GET']) def full_text_image(): return send_from_directory('static', 'img/full_text_scielo_img.gif') @main.route("/robots.txt", methods=['GET']) def get_robots_txt_file(): return send_from_directory('static', 'robots.txt') @main.route("/revistas/<path:journal_seg>/<string:page>.htm", methods=['GET']) def router_legacy_info_pages(journal_seg, page): """ Essa view function realiza o redirecionamento das URLs antigas para as novas URLs. Mantém um dicionário como uma tabela relacionamento entre o nome das páginas que pode ser: Página âncora [iaboutj.htm, eaboutj.htm, paboutj.htm] -> #about [iedboard.htm, eedboard.htm, pedboard.htm] -> #editors [iinstruc.htm einstruc.htm, pinstruc.htm]-> #instructions isubscrp.htm -> Sem âncora """ page_anchor = { 'iaboutj': '#about', 'eaboutj': '#about', 'paboutj': '#about', 'eedboard': '#editors', 'iedboard': '#editors', 'pedboard': '#editors', 'iinstruc': '#instructions', 'pinstruc': '#instructions', 'einstruc': '#instructions' } return redirect('%s%s' % (url_for('main.about_journal', url_seg=journal_seg), page_anchor.get(page, '')), code=301) @main.route("/api/v1/counter_dict", methods=['GET']) def router_counter_dicts(): """ Essa view function retorna um dicionário, em formato JSON, que mapeia PIDs a insumos necessários para o funcionamento das aplicações Matomo & COUNTER & SUSHI. """ end_date = request.args.get('end_date', '', type=str) try: end_date = datetime.strptime(end_date, '%Y-%m-%d') except ValueError: end_date = datetime.now() begin_date = end_date - timedelta(days=30) page = request.args.get('page', type=int) if not page: page = 1 limit = request.args.get('limit', type=int) if not limit or limit > 100 or limit < 0: limit = 100 results = {'dictionary_date': end_date, 'end_date': end_date.strftime('%Y-%m-%d %H-%M-%S'), 'begin_date': begin_date.strftime('%Y-%m-%d %H-%M-%S'), 'documents': {}, 'collection': current_app.config['OPAC_COLLECTION']} articles = controllers.get_articles_by_date_range(begin_date, end_date, page, limit) for a in articles.items: results['documents'].update(get_article_counter_data(a)) results['total'] = articles.total results['pages'] = articles.pages results['limit'] = articles.per_page results['page'] = articles.page return jsonify(results) def get_article_counter_data(article): return { article.aid: { "journal_acronym": article.journal.acronym, "pid": article.pid if article.pid else '', "aop_pid": article.aop_pid if article.aop_pid else '', "pid_v1": article.scielo_pids.get('v1', ''), "pid_v2": article.scielo_pids.get('v2', ''), "pid_v3": article.scielo_pids.get('v3', ''), "publication_date": article.publication_date, "default_language": article.original_language, "create": article.created, "update": article.updated } } @main.route('/cgi-bin/wxis.exe/iah/') def author_production(): # http://www.scielo.br/cgi-bin/wxis.exe/iah/ # ?IsisScript=iah/iah.xis&base=article%5Edlibrary&format=iso.pft& # lang=p&nextAction=lnk& # indexSearch=AU&exprSearch=MEIERHOFFER,+LILIAN+KOZSLOWSKI # -> # //search.scielo.org/?lang=pt&q=au:MEIERHOFFER,+LILIAN+KOZSLOWSKI search_url = current_app.config.get('URL_SEARCH') if not search_url: abort(404, "URL_SEARCH: {}".format(_('Página não encontrada'))) qs_exprSearch = request.args.get('exprSearch', type=str) or '' qs_indexSearch = request.args.get('indexSearch', type=str) or '' qs_lang = request.args.get('lang', type=str) or '' _lang = IAHX_LANGS.get(qs_lang) or '' _lang = _lang and "lang={}".format(_lang) _expr = "{}{}".format( qs_indexSearch == "AU" and "au:" or '', qs_exprSearch) _expr = _expr and "q={}".format(_expr.replace(" ", "+")) _and = _lang and _expr and "&" or '' _question_mark = (_lang or _expr) and "?" or "" if search_url.startswith("//"): protocol = "https:" elif search_url.startswith("http"): protocol = "" else: protocol = "https://" url = "{}{}{}{}{}{}".format( protocol, search_url, _question_mark, _lang, _and, _expr) return redirect(url, code=301)
34.957888
128
0.625358
407
0.006883
0
0
50,774
0.858641
0
0
13,945
0.235824
714ebaf58f896dbaa65742bb16b60c72d8438768
252
py
Python
create_read_write_1/Writing/to_csv.py
CodeXfull/Pandas
08b0adc28eedba47f6eb8303ba6a36a37ababb92
[ "MIT" ]
null
null
null
create_read_write_1/Writing/to_csv.py
CodeXfull/Pandas
08b0adc28eedba47f6eb8303ba6a36a37ababb92
[ "MIT" ]
null
null
null
create_read_write_1/Writing/to_csv.py
CodeXfull/Pandas
08b0adc28eedba47f6eb8303ba6a36a37ababb92
[ "MIT" ]
null
null
null
""" Converter um DataFrame para CSV """ import pandas as pd dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamão"], "Nomes": ["Éverton", "Márcia"]}, index=["Linha 1", "Linha 2"]) dataset.to_csv("dataset.csv")
25.2
55
0.543651
0
0
0
0
0
0
0
0
121
0.47451
714ec7d33bab5008ec611874fc87d94cc9deca3c
9,769
py
Python
venv/Lib/site-packages/pygsheets/client.py
13rilliant/Python-CMS
56c4f3f1cbdd81020aa690ab92d0e26d042458c1
[ "MIT" ]
1
2019-04-22T14:22:38.000Z
2019-04-22T14:22:38.000Z
venv/Lib/site-packages/pygsheets/client.py
13rilliant/Python-Updates-Text-Files-from-Sheets
56c4f3f1cbdd81020aa690ab92d0e26d042458c1
[ "MIT" ]
null
null
null
venv/Lib/site-packages/pygsheets/client.py
13rilliant/Python-Updates-Text-Files-from-Sheets
56c4f3f1cbdd81020aa690ab92d0e26d042458c1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*-. import re import warnings import os import logging from pygsheets.drive import DriveAPIWrapper from pygsheets.sheet import SheetAPIWrapper from pygsheets.spreadsheet import Spreadsheet from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption from google_auth_httplib2 import AuthorizedHttp GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000 _url_key_re_v1 = re.compile(r'key=([^&#]+)') _url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)") _email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?") # _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) _deprecated_keyword_mapping = { 'parent_id': 'folder', } class Client(object): """Create or access Google spreadsheets. Exposes members to create new spreadsheets or open existing ones. Use `authorize` to instantiate an instance of this class. >>> import pygsheets >>> c = pygsheets.authorize() The sheet API service object is stored in the sheet property and the drive API service object in the drive property. >>> c.sheet.get('<SPREADSHEET ID>') >>> c.drive.delete('<FILE ID>') :param credentials: The credentials object returned by google-auth or google-auth-oauthlib. :param retries: (Optional) Number of times to retry a connection before raising a TimeOut error. Default: 3 :param http: The underlying HTTP object to use to make requests. If not specified, a :class:`httplib2.Http` instance will be constructed. """ spreadsheet_cls = Spreadsheet def __init__(self, credentials, retries=3, http=None): self.oauth = credentials self.logger = logging.getLogger(__name__) http = AuthorizedHttp(credentials, http=http) data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") self.sheet = SheetAPIWrapper(http, data_path, retries=retries) self.drive = DriveAPIWrapper(http, data_path) @property def teamDriveId(self): """ Enable team drive support Deprecated: use client.drive.enable_team_drive(team_drive_id=?) """ return self.drive.team_drive_id @teamDriveId.setter def teamDriveId(self, value): warnings.warn("Depricated please use drive.enable_team_drive") self.drive.enable_team_drive(value) def spreadsheet_ids(self, query=None): """Get a list of all spreadsheet ids present in the Google Drive or TeamDrive accessed.""" return [x['id'] for x in self.drive.spreadsheet_metadata(query)] def spreadsheet_titles(self, query=None): """Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed.""" return [x['name'] for x in self.drive.spreadsheet_metadata(query)] def create(self, title, template=None, folder=None, **kwargs): """Create a new spreadsheet. The title will always be set to the given value (even overwriting the templates title). The template can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_ or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored. :param title: Title of the new spreadsheet. :param template: A template to create the new spreadsheet from. :param folder: The Id of the folder this sheet will be stored in. :param kwargs: Standard parameters (see reference for details). :return: :class:`~pygsheets.Spreadsheet` """ result = self.sheet.create(title, template=template, **kwargs) if folder: self.drive.move_file(result['spreadsheetId'], old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0], new_folder=folder) return self.spreadsheet_cls(self, jsonsheet=result) def open(self, title): """Open a spreadsheet by title. In a case where there are several sheets with the same title, the first one found is returned. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open('TestSheet') :param title: A title of a spreadsheet. :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found. """ try: spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0] return self.open_by_key(spreadsheet['id']) except (KeyError, IndexError): raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title) def open_by_key(self, key): """Open a spreadsheet by key. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE') :param key: The key of a spreadsheet. (can be found in the sheet URL) :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found. """ response = self.sheet.get(key, fields='properties,sheets/properties,spreadsheetId,namedRanges', includeGridData=False) return self.spreadsheet_cls(self, response) def open_by_url(self, url): """Open a spreadsheet by URL. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl') :param url: URL of a spreadsheet as it appears in a browser. :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL. """ m1 = _url_key_re_v1.search(url) if m1: return self.open_by_key(m1.group(1)) else: m2 = _url_key_re_v2.search(url) if m2: return self.open_by_key(m2.group(1)) else: raise NoValidUrlKeyFound def open_all(self, query=''): """Opens all available spreadsheets. Result can be filtered when specifying the query parameter. On the details on how to form the query: `Reference <https://developers.google.com/drive/v3/web/search-parameters>`_ :param query: (Optional) Can be used to filter the returned metadata. :returns: A list of :class:`~pygsheets.Spreadsheet`. """ return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)] def open_as_json(self, key): """Return a json representation of the spreadsheet. See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details. """ return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,' 'spreadsheetId,namedRanges', includeGridData=False) def get_range(self, spreadsheet_id, value_range, major_dimension='ROWS', value_render_option=ValueRenderOption.FORMATTED_VALUE, date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER): """Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range. Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__ :param spreadsheet_id: The ID of the spreadsheet to retrieve data from. :param value_range: The A1 notation of the values to retrieve. :param major_dimension: The major dimension that results should use. For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]], whereas requesting range=A1:B2,majorDimension=COLUMNS will return [[1,3],[2,4]]. :param value_render_option: How values should be represented in the output. The default render option is `ValueRenderOption.FORMATTED_VALUE`. :param date_time_render_option: How dates, times, and durations should be represented in the output. This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`]. :return: An array of arrays with the values fetched. Returns an empty array if no values were fetched. Values are dynamically typed as int, float or string. """ result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option, date_time_render_option) try: return result['values'] except KeyError: return [['']]
46.519048
142
0.614085
8,988
0.920053
0
0
372
0.03808
0
0
6,154
0.629952
714ecc8f34f21f3f5078c51278dfea154ffd4835
1,511
py
Python
model/group_contact.py
NatalyAristova/Training_python
e95a2b9e25238285d705a880fd94d73f173c3a31
[ "Apache-2.0" ]
null
null
null
model/group_contact.py
NatalyAristova/Training_python
e95a2b9e25238285d705a880fd94d73f173c3a31
[ "Apache-2.0" ]
null
null
null
model/group_contact.py
NatalyAristova/Training_python
e95a2b9e25238285d705a880fd94d73f173c3a31
[ "Apache-2.0" ]
null
null
null
from sys import maxsize class Group_contact: def __init__(self,firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None, address=None, home=None, mobile=None, work=None, fax=None, email=None, email2=None, email3=None, byear=None, address2=None, phone2=None, notes=None, all_phones_from_home_page=None, id=None, all_emails_from_home_page=None): self.firstname=firstname self.middlename=middlename self.lastname=lastname self.nickname=nickname self.title=title self.company=company self.address=address self.home=home self.mobile=mobile self.work=work self.fax=fax self.email=email self.email2 = email2 self.email3 = email3 self.byear=byear self.address2=address2 self.phone2=phone2 self.notes=notes self.id = id self.all_phones_from_home_page=all_phones_from_home_page self.all_emails_from_home_page = all_emails_from_home_page def __repr__(self): return "%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.middlename, self.nickname, self.title) def __eq__(self, other): return (self.id is None or other.id is None or self.id == other.id) and (self.lastname, self.firstname) == (other.lastname, other.firstname) def id_or_max(self): if self.id: return int(self.id) else: return maxsize
36.853659
148
0.650563
1,485
0.982793
0
0
0
0
0
0
19
0.012574
714f78bb4bb01676183ee7d2b3639573c3d0ac56
712
py
Python
test/manual/documents/test_iter_documents.py
membranepotential/mendeley-python-sdk
0336f0164f4d409309e813cbd0140011b5b2ff8f
[ "Apache-2.0" ]
103
2015-01-12T00:40:51.000Z
2022-03-29T07:02:06.000Z
test/manual/documents/test_iter_documents.py
membranepotential/mendeley-python-sdk
0336f0164f4d409309e813cbd0140011b5b2ff8f
[ "Apache-2.0" ]
26
2015-01-10T04:08:41.000Z
2021-02-05T16:31:37.000Z
test/manual/documents/test_iter_documents.py
membranepotential/mendeley-python-sdk
0336f0164f4d409309e813cbd0140011b5b2ff8f
[ "Apache-2.0" ]
43
2015-03-04T18:11:06.000Z
2022-03-13T02:33:34.000Z
from itertools import islice from test import get_user_session, cassette from test.resources.documents import delete_all_documents, create_document def test_should_iterate_through_documents(): session = get_user_session() delete_all_documents() with cassette('fixtures/resources/documents/iter_documents/iterate_through_documents.yaml'): create_document(session, 'title 1') create_document(session, 'title 2') create_document(session, 'title 3') docs = list(islice(session.documents.iter(page_size=2), 3)) assert len(docs) == 3 assert docs[0].title == 'title 1' assert docs[1].title == 'title 2' assert docs[2].title == 'title 3'
32.363636
96
0.706461
0
0
0
0
0
0
0
0
130
0.182584
714fe59976a41e4840adb621109e180ee047b25c
5,567
py
Python
demo.py
cbsudux/minimal-hand
893c252e7e818a9a96b279023ea8a78a88fb0a4d
[ "MIT" ]
null
null
null
demo.py
cbsudux/minimal-hand
893c252e7e818a9a96b279023ea8a78a88fb0a4d
[ "MIT" ]
null
null
null
demo.py
cbsudux/minimal-hand
893c252e7e818a9a96b279023ea8a78a88fb0a4d
[ "MIT" ]
null
null
null
import argparse import cv2 import keyboard import numpy as np import open3d as o3d import os import pygame from transforms3d.axangles import axangle2mat import config from hand_mesh import HandMesh from kinematics import mpii_to_mano from utils import OneEuroFilter, imresize from wrappers import ModelPipeline from utils import * def video_to_images(vid_file, img_folder=None, return_info=False): if img_folder is None: img_folder = osp.join('/tmp', osp.basename(vid_file).replace('.', '_')) os.makedirs(img_folder, exist_ok=True) command = ['ffmpeg', '-i', vid_file, '-f', 'image2', '-v', 'error', f'{img_folder}/%06d.png'] print(f'Running \"{" ".join(command)}\"') subprocess.call(command) print(f'Images saved to \"{img_folder}\"') img_shape = cv2.imread(osp.join(img_folder, '000001.png')).shape if return_info: return img_folder, len(os.listdir(img_folder)), img_shape else: return img_folder def run(args): ############ output visualization ############ # view_mat = axangle2mat([1, 0, 0], np.pi) # align different coordinate systems # window_size = 1080 # hand_mesh = HandMesh(config.HAND_MESH_MODEL_PATH) # mesh = o3d.geometry.TriangleMesh() # mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces) # mesh.vertices = \ # o3d.utility.Vector3dVector(np.matmul(view_mat, hand_mesh.verts.T).T * 1000) # mesh.compute_vertex_normals() # viewer = o3d.visualization.Visualizer() # viewer.create_window( # width=window_size + 1, height=window_size + 1, # window_name='Minimal Hand - output' # ) # viewer.add_geometry(mesh) # view_control = viewer.get_view_control() # cam_params = view_control.convert_to_pinhole_camera_parameters() # extrinsic = cam_params.extrinsic.copy() # extrinsic[0:3, 3] = 0 # cam_params.extrinsic = extrinsic # cam_params.intrinsic.set_intrinsics( # window_size + 1, window_size + 1, config.CAM_FX, config.CAM_FY, # window_size // 2, window_size // 2 # ) # view_control.convert_from_pinhole_camera_parameters(cam_params) # view_control.set_constant_z_far(1000) # render_option = viewer.get_render_option() # render_option.load_from_json('./render_option.json') # viewer.update_renderer() # ############ input visualization ############ # pygame.init() # display = pygame.display.set_mode((window_size, window_size)) # pygame.display.set_caption('Minimal Hand - input') # ############ misc ############ # mesh_smoother = OneEuroFilter(4.0, 0.0) # clock = pygame.time.Clock() ############ Move all of above code to local to render ########### video_file = args.vid_file if not os.path.isfile(video_file): exit(f'Input video \"{video_file}\" does not exist!') output_path = os.path.join(args.output_folder, os.path.basename(video_file).replace('.mp4', '')) os.makedirs(output_path, exist_ok=True) image_folder, num_frames, img_shape = video_to_images(video_file, return_info=True) print(f'Input video number of frames {num_frames}') orig_height, orig_width = img_shape[:2] # total_time = time.time() import pdb; pdb.set_trace() image_file_names = [ osp.join(image_folder, x) for x in os.listdir(image_folder) if x.endswith('.png') or x.endswith('.jpg') ] model = ModelPipeline() for i in image_file_names: # What do all these conditions check for? frame_large = x if frame_large is None: continue if frame_large.shape[0] > frame_large.shape[1]: margin = int((frame_large.shape[0] - frame_large.shape[1]) / 2) frame_large = frame_large[margin:-margin] else: margin = int((frame_large.shape[1] - frame_large.shape[0]) / 2) frame_large = frame_large[:, margin:-margin] frame_large = np.flip(frame_large, axis=1).copy() # why? Camera flip? frame = imresize(frame_large, (128, 128)) # needed ######## Golden lines, run this here ######### _, theta_mpii = model.process(frame) theta_mano = mpii_to_mano(theta_mpii) ######## Save theta_mano and pass as input to local ######## v = hand_mesh.set_abs_quat(theta_mano) v *= 2 # for better visualization v = v * 1000 + np.array([0, 0, 400]) v = mesh_smoother.process(v) mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces) mesh.vertices = o3d.utility.Vector3dVector(np.matmul(view_mat, v.T).T) mesh.paint_uniform_color(config.HAND_COLOR) mesh.compute_triangle_normals() mesh.compute_vertex_normals() # for some version of open3d you may need `viewer.update_geometry(mesh)` viewer.update_geometry() viewer.poll_events() display.blit( pygame.surfarray.make_surface( np.transpose( imresize(frame_large, (window_size, window_size) ), (1, 0, 2)) ), (0, 0) ) pygame.display.update() if keyboard.is_pressed("esc"): break clock.tick(30) # What's this do? If it adds delay remove it if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--vid_file', type=str, help='input video path or youtube link') args = parser.parse_args() run(args)
33.136905
100
0.629064
0
0
0
0
0
0
0
0
2,188
0.39303
715027948c136a1c6e6c296495419c7112dea3be
1,929
py
Python
test_project/settings.py
incuna/incuna-groups
148c181faf66fe73792cb2c5bbf5500ba61aa22d
[ "BSD-2-Clause" ]
1
2017-09-29T23:58:02.000Z
2017-09-29T23:58:02.000Z
test_project/settings.py
incuna/incuna-groups
148c181faf66fe73792cb2c5bbf5500ba61aa22d
[ "BSD-2-Clause" ]
51
2015-03-30T08:58:15.000Z
2022-01-13T00:40:17.000Z
test_project/settings.py
incuna/incuna-groups
148c181faf66fe73792cb2c5bbf5500ba61aa22d
[ "BSD-2-Clause" ]
null
null
null
import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DEBUG = True ALLOWED_HOSTS = [] ROOT_URLCONF = 'groups.tests.urls' STATIC_URL = '/static/' SECRET_KEY = 'krc34ji^-fd-=+r6e%p!0u0k9h$9!q*_#l=6)74h#o(jrxsx4p' PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',) DATABASES = { 'default': dj_database_url.config(default='postgres://localhost/groups') } DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage' INSTALLED_APPS = ( 'groups', 'crispy_forms', 'pagination', 'polymorphic', # Put contenttypes before auth to work around test issue. # See: https://code.djangoproject.com/ticket/10827#comment:12 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'groups', 'tests', 'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.request', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', ], }, }, ] CRISPY_TEMPLATE_PACK = 'bootstrap3' TEST_RUNNER = 'test_project.test_runner.Runner'
28.367647
76
0.659927
0
0
0
0
0
0
0
0
1,148
0.595127
7150fca7ddfd290e2618756c7d1c3d98b7e62c0b
11,824
py
Python
tests/test_akismet.py
cclauss/akismet
7b65bc163d6947a3013d01bf9accf1bc6c0781ca
[ "BSD-3-Clause" ]
9
2015-07-21T01:43:05.000Z
2021-04-01T12:53:32.000Z
tests/test_akismet.py
cclauss/akismet
7b65bc163d6947a3013d01bf9accf1bc6c0781ca
[ "BSD-3-Clause" ]
3
2015-09-28T09:01:17.000Z
2021-11-18T08:19:36.000Z
tests/test_akismet.py
cclauss/akismet
7b65bc163d6947a3013d01bf9accf1bc6c0781ca
[ "BSD-3-Clause" ]
7
2015-09-27T03:14:44.000Z
2021-12-05T22:48:44.000Z
import datetime import os import sys import unittest from unittest import mock import akismet class AkismetTests(unittest.TestCase): api_key = os.getenv("TEST_AKISMET_API_KEY") blog_url = os.getenv("TEST_AKISMET_BLOG_URL") api_key_env_var = "PYTHON_AKISMET_API_KEY" blog_url_env_var = "PYTHON_AKISMET_BLOG_URL" def setUp(self): self.api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url) class AkismetConfigurationTests(AkismetTests): """ Tests configuration of the Akismet class. """ def test_config_from_args(self): """ Configuring via explicit arguments succeeds. """ api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url) self.assertEqual(self.api_key, api.api_key) self.assertEqual(self.blog_url, api.blog_url) def test_bad_config_args(self): """ Configuring with bad arguments fails. """ with self.assertRaises(akismet.APIKeyError): akismet.Akismet(key="invalid", blog_url="http://invalid") def test_config_from_env(self): """ Configuring via environment variables succeeds. """ try: os.environ[self.api_key_env_var] = self.api_key os.environ[self.blog_url_env_var] = self.blog_url api = akismet.Akismet(key=None, blog_url=None) self.assertEqual(self.api_key, api.api_key) self.assertEqual(self.blog_url, api.blog_url) api = akismet.Akismet() self.assertEqual(self.api_key, api.api_key) self.assertEqual(self.blog_url, api.blog_url) finally: os.environ[self.api_key_env_var] = "" os.environ[self.blog_url_env_var] = "" def test_bad_config_env(self): """ Configuring with bad environment variables fails. """ try: os.environ[self.api_key_env_var] = "invalid" os.environ[self.blog_url_env_var] = "http://invalid" with self.assertRaises(akismet.APIKeyError): akismet.Akismet() finally: os.environ[self.api_key_env_var] = "" os.environ[self.blog_url_env_var] = "" def test_bad_url(self): """ Configuring with a bad URL fails. """ bad_urls = ( "example.com", "ftp://example.com", "www.example.com", "http//example.com", "https//example.com", ) for url in bad_urls: with self.assertRaises(akismet.ConfigurationError): akismet.Akismet(key=self.api_key, blog_url=url) def test_missing_config(self): """ Instantiating without any configuration fails. """ with self.assertRaises(akismet.ConfigurationError): akismet.Akismet(key=None, blog_url=None) with self.assertRaises(akismet.ConfigurationError): akismet.Akismet() def test_user_agent(self): """ The Akismet class creates the correct user-agent string. """ api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url) expected_agent = "Python/{} | akismet.py/{}".format( "{}.{}".format(*sys.version_info[:2]), akismet.__version__ ) self.assertEqual(expected_agent, api.user_agent_header["User-Agent"]) class AkismetAPITests(AkismetTests): """ Tests implementation of the Akismet API. """ base_kwargs = { "user_ip": "127.0.0.1", "user_agent": "Mozilla", # Always send this when testing; Akismet recognizes it as a # test query and does not train/learn from it. "is_test": 1, } def test_verify_key_valid(self): """ The verify_key operation succeeds with a valid key and URL. """ self.assertTrue(akismet.Akismet.verify_key(self.api_key, self.blog_url)) def test_verify_key_invalid(self): """ The verify_key operation fails with an invalid key and URL. """ self.assertFalse(akismet.Akismet.verify_key("invalid", "http://invalid")) def test_comment_check_spam(self): """ The comment_check method correctly identifies spam. """ check_kwargs = { # Akismet guarantees this will be classified spam. "comment_author": "viagra-test-123", **self.base_kwargs, } self.assertTrue(self.api.comment_check(**check_kwargs)) def test_comment_check_not_spam(self): """ The comment_check method correctly identifies non-spam. """ check_kwargs = { # Akismet guarantees this will not be classified spam. "user_role": "administrator", **self.base_kwargs, } self.assertFalse(self.api.comment_check(**check_kwargs)) def test_submit_spam(self): """ The submit_spam method succeeds. """ spam_kwargs = { "comment_type": "comment", "comment_author": "viagra-test-123", "comment_content": "viagra-test-123", **self.base_kwargs, } self.assertTrue(self.api.submit_spam(**spam_kwargs)) def test_submit_ham(self): """ The submit_ham method succeeds. """ ham_kwargs = { "comment_type": "comment", "comment_author": "Legitimate Author", "comment_content": "This is a legitimate comment.", "user_role": "administrator", **self.base_kwargs, } self.assertTrue(self.api.submit_ham(**ham_kwargs)) def test_unexpected_verify_key_response(self): """ Unexpected verify_key API responses are correctly handled. """ post_mock = mock.MagicMock() with mock.patch("requests.post", post_mock): with self.assertRaises(akismet.ProtocolError): akismet.Akismet.verify_key(self.api_key, self.blog_url) def test_unexpected_comment_check_response(self): """ Unexpected comment_check API responses are correctly handled. """ post_mock = mock.MagicMock() with mock.patch("requests.post", post_mock): with self.assertRaises(akismet.ProtocolError): check_kwargs = {"comment_author": "viagra-test-123", **self.base_kwargs} self.api.comment_check(**check_kwargs) def test_unexpected_submit_spam_response(self): """ Unexpected submit_spam API responses are correctly handled. """ post_mock = mock.MagicMock() with mock.patch("requests.post", post_mock): with self.assertRaises(akismet.ProtocolError): spam_kwargs = { "comment_type": "comment", "comment_author": "viagra-test-123", "comment_content": "viagra-test-123", **self.base_kwargs, } self.api.submit_spam(**spam_kwargs) def test_unexpected_submit_ham_response(self): """ Unexpected submit_ham API responses are correctly handled. """ post_mock = mock.MagicMock() with mock.patch("requests.post", post_mock): with self.assertRaises(akismet.ProtocolError): ham_kwargs = { "comment_type": "comment", "comment_author": "Legitimate Author", "comment_content": "This is a legitimate comment.", "user_role": "administrator", **self.base_kwargs, } self.api.submit_ham(**ham_kwargs) class AkismetRequestTests(AkismetTests): """ Tests the requests constructed by the Akismet class. """ def _get_mock(self, text): """ Create a mock for requests.post() returning expected text. """ post_mock = mock.MagicMock() post_mock.return_value.text = text return post_mock def _mock_request(self, method, endpoint, text, method_kwargs): """ Issue a mocked request and verify requests.post() was called with the correct arguments. """ method_kwargs.update(user_ip="127.0.0.1", user_agent="Mozilla", is_test=1) expected_kwargs = {"blog": self.blog_url, **method_kwargs} post_mock = self._get_mock(text) with mock.patch("requests.post", post_mock): getattr(self.api, method)(**method_kwargs) post_mock.assert_called_with( endpoint.format(self.api_key), data=expected_kwargs, headers=akismet.Akismet.user_agent_header, ) def test_verify_key(self): """ The request issued by verify_key() is correct. """ post_mock = self._get_mock("valid") with mock.patch("requests.post", post_mock): akismet.Akismet.verify_key(self.api_key, self.blog_url) post_mock.assert_called_with( akismet.Akismet.VERIFY_KEY_URL, data={"key": self.api_key, "blog": self.blog_url}, headers=akismet.Akismet.user_agent_header, ) def test_comment_check(self): """ The request issued by comment_check() is correct. """ self._mock_request( "comment_check", akismet.Akismet.COMMENT_CHECK_URL, "true", {"comment_author": "viagra-test-123"}, ) def test_submit_spam(self): """ The request issued by submit_spam() is correct. """ self._mock_request( "submit_spam", akismet.Akismet.SUBMIT_SPAM_URL, akismet.Akismet.SUBMIT_SUCCESS_RESPONSE, {"comment_content": "Bad comment", "comment_author": "viagra-test-123"}, ) def test_submit_ham(self): """ The request issued by submit_ham() is correct. """ self._mock_request( "submit_ham", akismet.Akismet.SUBMIT_HAM_URL, akismet.Akismet.SUBMIT_SUCCESS_RESPONSE, { "comment_content": "Good comment", "comment_author": "Legitimate commenter", }, ) def test_full_kwargs(self): """ All optional Akismet arguments are correctly passed through. """ modified_timestamp = datetime.datetime.now() posted_timestamp = modified_timestamp - datetime.timedelta(seconds=30) full_kwargs = { "referrer": "http://www.example.com/", "permalink": "http://www.example.com/#comment123", "comment_type": "comment", "comment_author": "Legitimate Author", "comment_author_email": "[email protected]", "comment_author_url": "http://www.example.com/", "comment_content": "This is a fine comment.", "comment_date_gmt": posted_timestamp.isoformat(), "comment_post_modified_gmt": modified_timestamp.isoformat(), "blog_lang": "en_us", "blog_charset": "utf-8", "user_role": "administrator", "recheck_reason": "edit", } self._mock_request( "comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", full_kwargs ) def test_unknown_kwargs(self): """ Unknown Akismet arguments are correctly rejected. """ bad_kwargs = {"bad_arg": "bad_val"} with self.assertRaises(akismet.UnknownArgumentError): self._mock_request( "comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", bad_kwargs )
31.87062
88
0.58931
11,717
0.990951
0
0
0
0
0
0
4,008
0.338972
7151993c0f8145d0e1fdf8168c7b895118af0892
2,581
py
Python
experimenting/dataset/datamodule.py
gaurvigoyal/lifting_events_to_3d_hpe
66d27eb7534f81a95d9f68e17cc534ef2a2c9b1c
[ "Apache-2.0" ]
19
2021-04-16T11:43:34.000Z
2022-01-07T10:21:42.000Z
experimenting/dataset/datamodule.py
gaurvigoyal/lifting_events_to_3d_hpe
66d27eb7534f81a95d9f68e17cc534ef2a2c9b1c
[ "Apache-2.0" ]
4
2021-04-16T14:07:38.000Z
2022-02-12T16:35:22.000Z
experimenting/dataset/datamodule.py
gianscarpe/event-camera
8bb60a281adb9e2c961b5e12c24c9bbbba1876d5
[ "Apache-2.0" ]
5
2021-04-23T16:30:37.000Z
2022-02-12T01:42:14.000Z
import pytorch_lightning as pl from torch.utils.data import DataLoader, Dataset from .core import BaseCore from .factory import BaseDataFactory class DataModule(pl.LightningDataModule): def __init__( self, dataset_factory: BaseDataFactory, core: BaseCore, aug_train_config, aug_test_config, batch_size: int, num_workers: int, train_val_split: float = 0.8, ): super().__init__() self.core = core self.batch_size = batch_size self.num_workers = num_workers self.dataset_factory = dataset_factory self.aug_train_config = aug_train_config self.aug_test_config = aug_test_config self.train_val_split = train_val_split def prepare_data(self, *args, **kwargs): pass def setup(self, stage=None): self.dataset_factory.set_dataset_core(self.core) ( self.train_indexes, self.val_indexes, self.test_indexes, ) = self.dataset_factory.get_train_test_split(self.train_val_split) self.train_dataset = self.dataset_factory.get_dataset( self.train_indexes, self.aug_train_config ) self.val_dataset = self.dataset_factory.get_dataset( self.val_indexes, self.aug_test_config ) self.test_dataset = self.dataset_factory.get_dataset( self.test_indexes, self.aug_test_config ) def train_dataloader(self): return get_dataloader(self.train_dataset, self.batch_size, self.num_workers) def val_dataloader(self): return get_dataloader( self.val_dataset, self.batch_size, shuffle=False, num_workers=self.num_workers, ) def test_dataloader(self): return get_dataloader( self.test_dataset, self.batch_size, shuffle=False, num_workers=self.num_workers, ) def test_frames_only_dataloader(self): return get_dataloader( self.dataset_factory.get_frame_only_dataset( self.test_indexes, self.aug_test_config ), self.batch_size, shuffle=False, num_workers=self.num_workers, ) def get_dataloader( dataset: Dataset, batch_size: int, num_workers: int = 12, shuffle=True ) -> DataLoader: loader = DataLoader( dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True, ) return loader
28.362637
84
0.631538
2,139
0.828749
0
0
0
0
0
0
0
0
7152cc15e7baaacfb5a36373bdeff28f520d9e9f
2,906
py
Python
sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py
giulianoiorio/PeTar
f6a849552b3d8e47c5e08fe90fed05bf38bc407d
[ "MIT" ]
null
null
null
sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py
giulianoiorio/PeTar
f6a849552b3d8e47c5e08fe90fed05bf38bc407d
[ "MIT" ]
null
null
null
sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py
giulianoiorio/PeTar
f6a849552b3d8e47c5e08fe90fed05bf38bc407d
[ "MIT" ]
null
null
null
import pandas as pd import matplotlib.pyplot as plt import numpy as np #Load file dt=pd.read_csv("sevn_output/output_0.csv") #Give a look to the columns print(dt.columns) #Consider only the final states dt=dt.drop_duplicates(["ID","name"], keep='last') #Load evolved file dte=pd.read_csv("sevn_output/evolved_0.dat",sep='\s+') #Give a look to the columns print(dte.columns) dte=dte.rename(columns={'#ID': 'ID','Mass_0':"Mzams_0", 'Mass_1':"Mzams_1"}) #After change print(dte.columns) #Join the two dataset dt = dt.merge(dte, on=["ID","name"], how="inner", suffixes=("","_ini") ) # - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables # - how: type of join to use, see documentation here and the next slide # - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes. #Give a look to the columns print(dt.columns) #Create filter indexes idx0 = (dt.RemnantType_0==6) idx1 = (dt.RemnantType_1==6) idxb0 = idx0 & dt.Semimajor.notnull() idxb1 = idx1 & dt.Semimajor.notnull() idxm0 = idxb0 & (dt.GWtime + dt.BWorldtime <= 14000) idxm1 = idxb1 & (dt.GWtime + dt.BWorldtime <= 14000) #Filter and join masses AllBH = pd.concat([dt[idx0].Mass_0,dt[idx1].Mass_1]) BoundBH = pd.concat([dt[idxb0].Mass_0,dt[idxb1].Mass_1]) MergingBH = pd.concat([dt[idxm0].Mass_0,dt[idxm1].Mass_1]) #Filter and join initial masses AllBHzams = pd.concat([dt[idx0].Mzams_0,dt[idx1].Mzams_1]) BoundBHzams = pd.concat([dt[idxb0].Mzams_0,dt[idxb1].Mzams_1]) MergingBHzams = pd.concat([dt[idxm0].Mzams_0,dt[idxm1].Mzams_1]) #Filter and join initial semimajor axis AllBHa = pd.concat([dt[idx0].a,dt[idx1].a]) BoundBHa = pd.concat([dt[idxb0].a,dt[idxb1].a]) MergingBHa = pd.concat([dt[idxm0].a,dt[idxm1].a]) #Plot plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.scatter(AllBHzams,AllBH,zorder=1,edgecolor="k",s=30,label="All") plt.scatter(BoundBHzams,BoundBH,zorder=2,edgecolor="k",s=30, label="Bound") plt.scatter(MergingBHzams,MergingBH,zorder=3,edgecolor="k",s=30, label="Merging") plt.plot(np.linspace(0,140),np.linspace(0,140),ls="dashed",c="gray") plt.xscale("log") plt.yscale("log") plt.ylabel("BH mass [M$_\odot$]",fontsize=18) plt.xlabel("$M\mathrm{zams}$ [M$_\odot$]",fontsize=18) plt.gca().tick_params(axis='both', which='major', labelsize=18) plt.legend(fontsize=16) plt.subplot(1,2,2) plt.scatter(AllBHa,AllBH,zorder=1,edgecolor="k",s=30,label="All") plt.scatter(BoundBHa,BoundBH,zorder=2,edgecolor="k",s=30,label="Bound") plt.scatter(MergingBHa,MergingBH,zorder=3,edgecolor="k",s=30,label="Merging") plt.xscale("log") plt.yscale("log") plt.xlabel("Semimajor initial [R$_\odot$]",fontsize=18) plt.ylabel("BH mass [M$_\odot$]",fontsize=18) plt.gca().tick_params(axis='both', which='major', labelsize=18) plt.tight_layout() plt.savefig("analysis3.png") plt.show()
34.595238
144
0.719202
0
0
0
0
0
0
0
0
1,005
0.345836
7152e1ff12041d507f6d8d481cc402ae12c07a3f
91
py
Python
apps/tg_bot/apps.py
VladimirLazor/Lohika
a36407feeb2e3ade4f8c689030f343d88ff47a92
[ "Apache-2.0" ]
null
null
null
apps/tg_bot/apps.py
VladimirLazor/Lohika
a36407feeb2e3ade4f8c689030f343d88ff47a92
[ "Apache-2.0" ]
9
2021-03-19T15:59:10.000Z
2022-03-12T00:57:56.000Z
apps/tg_bot/apps.py
VladimirLazor/Lohika
a36407feeb2e3ade4f8c689030f343d88ff47a92
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig class TgBotConfig(AppConfig): name = 'apps.tg_bot'
15.166667
33
0.747253
54
0.593407
0
0
0
0
0
0
13
0.142857
71530e1943a52265477429affe05d43b9f82d449
2,152
py
Python
office365/sharepoint/portal/group_site_manager.py
rikeshtailor/Office365-REST-Python-Client
ca7bfa1b22212137bb4e984c0457632163e89a43
[ "MIT" ]
null
null
null
office365/sharepoint/portal/group_site_manager.py
rikeshtailor/Office365-REST-Python-Client
ca7bfa1b22212137bb4e984c0457632163e89a43
[ "MIT" ]
null
null
null
office365/sharepoint/portal/group_site_manager.py
rikeshtailor/Office365-REST-Python-Client
ca7bfa1b22212137bb4e984c0457632163e89a43
[ "MIT" ]
null
null
null
from office365.runtime.client_object import ClientObject from office365.runtime.client_result import ClientResult from office365.runtime.http.http_method import HttpMethod from office365.runtime.queries.service_operation_query import ServiceOperationQuery from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.group_creation_params import GroupCreationInformation from office365.sharepoint.portal.group_site_info import GroupSiteInfo class GroupSiteManager(ClientObject): def __init__(self, context): super(GroupSiteManager, self).__init__(context, ResourcePath("GroupSiteManager"), None) def create_group_ex(self, display_name, alias, is_public, optional_params=None): """ Create a modern site :param str display_name: :param str alias: :param bool is_public: :param office365.sharepoint.portal.group_creation_params.GroupCreationParams or None optional_params: """ payload = GroupCreationInformation(display_name, alias, is_public, optional_params) result = ClientResult(self.context, GroupSiteInfo()) qry = ServiceOperationQuery(self, "CreateGroupEx", None, payload, None, result) self.context.add_query(qry) return result def delete(self, site_url): """ Deletes a SharePoint Team site :type site_url: str """ payload = { "siteUrl": site_url } qry = ServiceOperationQuery(self, "Delete", None, payload) self.context.add_query(qry) return self def get_status(self, group_id): """Get the status of a SharePoint site :type group_id: str """ result = ClientResult(self.context, GroupSiteInfo()) qry = ServiceOperationQuery(self, "GetSiteStatus", None, {'groupId': group_id}, None, result) self.context.add_query(qry) def _construct_status_request(request): request.method = HttpMethod.Get request.url += "?groupId='{0}'".format(group_id) self.context.before_execute(_construct_status_request) return result
37.754386
109
0.701208
1,679
0.780204
0
0
0
0
0
0
497
0.230948
715433d014e2773f3519d53929b4573136138236
186
py
Python
tests/errors/e_tuple_args_T692.py
smok-serwis/cython
e551a3a348888bd89d4aad809916709a634af1fb
[ "Apache-2.0" ]
2
2020-01-29T08:20:22.000Z
2020-01-29T08:20:25.000Z
tests/errors/e_tuple_args_T692.py
smok-serwis/cython
e551a3a348888bd89d4aad809916709a634af1fb
[ "Apache-2.0" ]
1
2019-09-21T19:58:10.000Z
2019-09-21T19:58:10.000Z
tests/errors/e_tuple_args_T692.py
smok-serwis/cython
e551a3a348888bd89d4aad809916709a634af1fb
[ "Apache-2.0" ]
2
2017-06-18T04:09:18.000Z
2018-11-30T20:03:58.000Z
# ticket: 692 # mode: error def func((a, b)): return a + b _ERRORS = u""" 4:9: Missing argument name 5:11: undeclared name not builtin: a 5:15: undeclared name not builtin: b """
14.307692
36
0.645161
0
0
0
0
0
0
0
0
135
0.725806
71548039cb810f86d8a1fe4c36b02cd515b16949
558
py
Python
ble.py
Ladvien/esp32_upython_env
8b0feab940efd3feff16220473e1b5b27d679a56
[ "MIT" ]
null
null
null
ble.py
Ladvien/esp32_upython_env
8b0feab940efd3feff16220473e1b5b27d679a56
[ "MIT" ]
null
null
null
ble.py
Ladvien/esp32_upython_env
8b0feab940efd3feff16220473e1b5b27d679a56
[ "MIT" ]
null
null
null
import bluetooth import time bt = bluetooth.BLE() # singleton bt.active(True) # activate BT stack UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E') UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,) UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,) UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),) SERVICES = (UART_SERVICE,) ( (tx, rx,), ) = bt.gatts_register_services(SERVICES) bt.gap_advertise(100)
50.727273
112
0.716846
0
0
0
0
0
0
0
0
144
0.258065
715562602b941a7d39f1c3b9c3f9ed3ae5bab180
952
py
Python
examples/custom-generator/customer.py
luxbe/sledo
26aa2b59b11ea115afc25bb407602578cb342170
[ "MIT" ]
4
2021-12-13T17:52:52.000Z
2021-12-28T09:40:52.000Z
examples/custom-generator/customer.py
luxbe/sledo
26aa2b59b11ea115afc25bb407602578cb342170
[ "MIT" ]
null
null
null
examples/custom-generator/customer.py
luxbe/sledo
26aa2b59b11ea115afc25bb407602578cb342170
[ "MIT" ]
null
null
null
from random import randint from sledo.generate.field_generators.base import FieldGenerator values = ("Austria", "Belgium", "Bulgaria", "Croatia", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden", "United States", "Japan", "United Kingdom", "Bangladesh", "Argentina", "China") count = len(values) - 1 class CustomerAddressGenerator(FieldGenerator): def generate(self, **_): return values[randint(0, count)]
21.636364
63
0.456933
117
0.122899
0
0
0
0
0
0
319
0.335084
71567463ea68f026c0c3520620d04799ac10631b
731
py
Python
status-uncertain/baseline_model.py
crawftv/CRAwTO
8c6fdb93ed963cbddfe967b041e8beb578d1e94d
[ "BSD-3-Clause" ]
1
2020-04-03T12:43:27.000Z
2020-04-03T12:43:27.000Z
status-uncertain/baseline_model.py
crawftv/CRAwTO
8c6fdb93ed963cbddfe967b041e8beb578d1e94d
[ "BSD-3-Clause" ]
21
2020-02-14T04:29:03.000Z
2020-07-14T02:19:37.000Z
status-uncertain/baseline_model.py
crawftv/CRAwTO
8c6fdb93ed963cbddfe967b041e8beb578d1e94d
[ "BSD-3-Clause" ]
1
2019-10-25T01:06:58.000Z
2019-10-25T01:06:58.000Z
#!/usr/bin/env python3 from sklearn.metrics import r2_score import numpy as np class BaselineModel(object): def get_params(self): return None def predict(self, X): return np.ones_like(X.index.values) * self._y_pred def score(self, X, y): y_true = y y_pred = np.ones_like(y_true) * self._y_pred return r2_score(y_true, y_pred) class BaselineClassificationPrediction(BaselineModel): def fit( self, X, y, ): self.y_pred = y.mode() return self def predict( self, X, ): return self.y_pred class BaselineRegressionPrediction(BaselineModel): def fit(self, X, y): self._y_pred = y.median() return self
20.305556
58
0.621067
643
0.879617
0
0
0
0
0
0
22
0.030096
7157c50528da6262c46158a9ce6e62a7c31b48be
3,229
py
Python
aligner/grow_diag_final.py
ecalder6/MT-HW2
1356aeb374a6e4d0b0ae819684bf314039948c56
[ "MIT" ]
null
null
null
aligner/grow_diag_final.py
ecalder6/MT-HW2
1356aeb374a6e4d0b0ae819684bf314039948c56
[ "MIT" ]
null
null
null
aligner/grow_diag_final.py
ecalder6/MT-HW2
1356aeb374a6e4d0b0ae819684bf314039948c56
[ "MIT" ]
null
null
null
import optparse import sys def make_set(data, s, e_vocab, f_vocab, aligned, reverse): for pair in data.split(): cur = pair.split('-') if reverse: e_vocab.add(int(cur[1])) f_vocab.add(int(cur[0])) aligned.add(int(cur[0])) s.add((int(cur[1]), int(cur[0]))) else: e_vocab.add(int(cur[0])) f_vocab.add(int(cur[1])) aligned.add(int(cur[0])) s.add((int(cur[0]), int(cur[1]))) def grow_diag_final_and(e2f_data, f2e_data): directions = [(-1,0),(0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)] for (i, (e2f, f2e)) in enumerate(zip(open(e2f_data), open(f2e_data))): e2f_set, f2e_set, e_vocab, f_vocab, e_aligned, f_aligned = set(), set(), set(), set(), set(), set() make_set(e2f, e2f_set, e_vocab, f_vocab, e_aligned, False) make_set(f2e, f2e_set, e_vocab, f_vocab, f_aligned, True) alignment = e2f_set & f2e_set union_alignment = e2f_set | f2e_set grow_diag(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, directions) final(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, True) for e, f in alignment: sys.stdout.write("%i-%i " % (e,f)) sys.stdout.write("\n") def grow_diag(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, directions): prev_len = 0 while prev_len != len(alignment): prev_len = len(alignment) for e in e_vocab: for f in f_vocab: if (e, f) in alignment: for d in directions: en, fn = e + d[0], f + d[1] if (en not in e_alignment or fn not in f_alignment) and (en, fn) in union_alignment: alignment.add((en, fn)) e_alignment.add(en) f_alignment.add(fn) def final(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, final_and): for e in e_vocab: for f in f_vocab: c = False if final_and: c = e not in e_alignment and f not in f_alignment else: c = e not in e_alignment or f not in f_alignment if c and (e, f) in union_alignment: alignment.add((e, f)) e_alignment.add(e) f_alignment.add(f) def main(): optparser = optparse.OptionParser() optparser.add_option("-d", "--data", dest="train", default="data/alignment", help="Data filename prefix (default=data)") optparser.add_option("-e", "--e2f", dest="e2f", default="ef", help="Suffix of English to French filename (default=ef)") optparser.add_option("-f", "--f2e", dest="f2e", default="fe", help="Suffix of French to English filename (default=fe)") optparser.add_option("-a", "--final_and", dest="final_and", action="store_true", help="Whether to use Final-And version of the algorithm") (opts, args) = optparser.parse_args() e2f_data = "%s.%s" % (opts.train, opts.e2f) f2e_data = "%s.%s" % (opts.train, opts.f2e) grow_diag_final_and(e2f_data, f2e_data) if __name__ == "__main__": main()
44.232877
142
0.577888
0
0
0
0
0
0
0
0
344
0.106535
715823dd8a36dcb9c1e16c0545d16a02d319badc
2,567
py
Python
tests/test_tbears_db.py
Transcranial-Solutions/t-bears
4712b8bb425814c444ee75f3220a31df934982aa
[ "Apache-2.0" ]
35
2018-08-24T03:39:35.000Z
2021-08-21T23:35:57.000Z
tests/test_tbears_db.py
Transcranial-Solutions/t-bears
4712b8bb425814c444ee75f3220a31df934982aa
[ "Apache-2.0" ]
40
2018-08-24T05:35:54.000Z
2021-12-15T08:23:38.000Z
tests/test_tbears_db.py
Transcranial-Solutions/t-bears
4712b8bb425814c444ee75f3220a31df934982aa
[ "Apache-2.0" ]
22
2018-08-28T15:11:46.000Z
2021-12-01T23:34:45.000Z
# -*- coding: utf-8 -*- # Copyright 2017-2018 ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import unittest from tbears.block_manager.tbears_db import TbearsDB DIRECTORY_PATH = os.path.abspath((os.path.dirname(__file__))) DB_PATH = os.path.join(DIRECTORY_PATH, './.tbears_db') class TestTBearsDB(unittest.TestCase): def setUp(self): self.TBEARS_DB = TbearsDB(TbearsDB.make_db(DB_PATH)) self.test_key = b'test_key' self.test_value = b'test_value' def tearDown(self): self.TBEARS_DB.close() shutil.rmtree(DB_PATH) def test_put_and_get(self): # Put and get self.TBEARS_DB.put(self.test_key, self.test_value) ret = self.TBEARS_DB.get(self.test_key) self.assertEqual(ret, self.test_value) # overwrite overwrite_value = b'test_value_overwrite' self.TBEARS_DB.put(self.test_key, overwrite_value) ret = self.TBEARS_DB.get(self.test_key) self.assertEqual(ret, overwrite_value) # get invalid key ret = self.TBEARS_DB.get(b'invalid_key') self.assertIsNone(ret) # put invalid type self.assertRaises(TypeError, self.TBEARS_DB.put, 'test_key', self.test_value) self.assertRaises(TypeError, self.TBEARS_DB.put, self.test_key, 123) def test_delete(self): self.TBEARS_DB.put(self.test_key, self.test_value) ret = self.TBEARS_DB.get(self.test_key) self.assertEqual(ret, self.test_value) self.TBEARS_DB.delete(self.test_key) ret = self.TBEARS_DB.get(self.test_key) self.assertIsNone(ret) def test_iterator(self): self.TBEARS_DB.put(b'key1', b'value1') self.TBEARS_DB.put(b'key2', b'value2') self.TBEARS_DB.put(b'key3', b'value3') self.TBEARS_DB.put(b'key4', b'value4') i = 1 for _, actual_value in self.TBEARS_DB.iterator(): expected_value = ('value' + str(i)).encode() self.assertEqual(expected_value, actual_value) i += 1
33.776316
85
0.679782
1,748
0.680951
0
0
0
0
0
0
805
0.313596
715a02ff047054f60c24cd7d80d0ef426229bc1b
1,658
py
Python
src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py
pierky/exabgp
34be537ae5906c0830b31da1152ae63108ccf911
[ "BSD-3-Clause" ]
1,560
2015-01-01T08:53:05.000Z
2022-03-29T20:22:43.000Z
src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py
pierky/exabgp
34be537ae5906c0830b31da1152ae63108ccf911
[ "BSD-3-Clause" ]
818
2015-01-01T17:38:40.000Z
2022-03-30T07:29:24.000Z
src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py
pierky/exabgp
34be537ae5906c0830b31da1152ae63108ccf911
[ "BSD-3-Clause" ]
439
2015-01-06T21:20:41.000Z
2022-03-19T23:24:25.000Z
# encoding: utf-8 """ mplsmask.py Created by Evelio Vila on 2016-12-01. Copyright (c) 2014-2017 Exa Networks. All rights reserved. """ from exabgp.bgp.message.notification import Notify from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Type | Length | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |L|R| Reserved | # +-+-+-+-+-+-+-+-+ # https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask # # +------------+------------------------------------------+-----------+ # | Bit | Description | Reference | # +------------+------------------------------------------+-----------+ # | 'L' | Label Distribution Protocol (LDP) | [RFC5036] | # | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] | # | | (RSVP-TE) | | # | 'Reserved' | Reserved for future use | | # +------------+------------------------------------------+-----------+ # RFC 7752 3.3.2.2. MPLS Protocol Mask TLV @LinkState.register() class MplsMask(FlagLS): REPR = 'MPLS Protocol mask' JSON = 'mpls-mask' TLV = 1094 FLAGS = ['LDP', 'RSVP-TE', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV'] LEN = 1
41.45
77
0.390229
178
0.107358
0
0
200
0.120627
0
0
1,312
0.791315
715d6a83862066d08f507e36bb0ef91281fb5c5f
4,977
py
Python
tests/test_cecum.py
hsorby/scaffoldmaker
5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1
[ "Apache-2.0" ]
null
null
null
tests/test_cecum.py
hsorby/scaffoldmaker
5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1
[ "Apache-2.0" ]
38
2018-04-04T10:40:26.000Z
2022-03-14T22:02:26.000Z
tests/test_cecum.py
hsorby/scaffoldmaker
5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1
[ "Apache-2.0" ]
28
2018-03-11T19:31:35.000Z
2022-02-03T23:14:21.000Z
import unittest from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange from opencmiss.utils.zinc.general import ChangeManager from opencmiss.zinc.context import Context from opencmiss.zinc.element import Element from opencmiss.zinc.field import Field from opencmiss.zinc.result import RESULT_OK from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1 from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace from testutils import assertAlmostEqualList class CecumScaffoldTestCase(unittest.TestCase): def test_cecum1(self): """ Test creation of cecum scaffold. """ parameterSetNames = MeshType_3d_cecum1.getParameterSetNames() self.assertEqual(parameterSetNames, ["Default", "Pig 1"]) options = MeshType_3d_cecum1.getDefaultOptions("Pig 1") self.assertEqual(30, len(options)) self.assertEqual(5, options.get("Number of segments")) self.assertEqual(2, options.get("Number of elements around tenia coli")) self.assertEqual(8, options.get("Number of elements along segment")) self.assertEqual(1, options.get("Number of elements through wall")) self.assertEqual(35.0, options.get("Start inner radius")) self.assertEqual(3.0, options.get("Start inner radius derivative")) self.assertEqual(38.0, options.get("End inner radius")) self.assertEqual(3.0, options.get("End inner radius derivative")) self.assertEqual(0.5, options.get("Corner inner radius factor")) self.assertEqual(0.25, options.get("Haustrum inner radius factor")) self.assertEqual(4.0, options.get("Segment length mid derivative factor")) self.assertEqual(3, options.get("Number of tenia coli")) self.assertEqual(5.0, options.get("Start tenia coli width")) self.assertEqual(0.0, options.get("End tenia coli width derivative")) self.assertEqual(2.0, options.get("Wall thickness")) ostiumOptions = options['Ileocecal junction'] ostiumSettings = ostiumOptions.getScaffoldSettings() self.assertEqual(1, ostiumSettings.get("Number of vessels")) self.assertEqual(8, ostiumSettings.get("Number of elements around ostium")) self.assertEqual(1, ostiumSettings.get("Number of elements through wall")) self.assertEqual(20.0, ostiumSettings.get("Ostium diameter")) self.assertEqual(10.0, ostiumSettings.get("Vessel inner diameter")) self.assertEqual(60, options.get("Ileocecal junction angular position degrees")) self.assertEqual(0.5, options.get("Ileocecal junction position along factor")) context = Context("Test") region = context.getDefaultRegion() self.assertTrue(region.isValid()) annotationGroups = MeshType_3d_cecum1.generateBaseMesh(region, options) self.assertEqual(2, len(annotationGroups)) fieldmodule = region.getFieldmodule() self.assertEqual(RESULT_OK, fieldmodule.defineAllFaces()) mesh3d = fieldmodule.findMeshByDimension(3) self.assertEqual(1492, mesh3d.getSize()) mesh2d = fieldmodule.findMeshByDimension(2) self.assertEqual(5617, mesh2d.getSize()) mesh1d = fieldmodule.findMeshByDimension(1) self.assertEqual(6767, mesh1d.getSize()) nodes = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES) self.assertEqual(2642, nodes.getSize()) datapoints = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_DATAPOINTS) self.assertEqual(0, datapoints.getSize()) coordinates = fieldmodule.findFieldByName("coordinates").castFiniteElement() self.assertTrue(coordinates.isValid()) minimums, maximums = evaluateFieldNodesetRange(coordinates, nodes) assertAlmostEqualList(self, minimums, [-49.01658984455258, -46.89686037622053, -2.343256155753525], 1.0E-6) assertAlmostEqualList(self, maximums, [42.18085849205387, 54.89264119402881, 180.0], 1.0E-6) with ChangeManager(fieldmodule): one = fieldmodule.createFieldConstant(1.0) faceMeshGroup = createFaceMeshGroupExteriorOnFace(fieldmodule, Element.FACE_TYPE_XI3_1) surfaceAreaField = fieldmodule.createFieldMeshIntegral(one, coordinates, faceMeshGroup) surfaceAreaField.setNumbersOfPoints(4) volumeField = fieldmodule.createFieldMeshIntegral(one, coordinates, mesh3d) volumeField.setNumbersOfPoints(3) fieldcache = fieldmodule.createFieldcache() result, surfaceArea = surfaceAreaField.evaluateReal(fieldcache, 1) self.assertEqual(result, RESULT_OK) self.assertAlmostEqual(surfaceArea, 65960.20655074248, delta=1.0E-6) result, volume = volumeField.evaluateReal(fieldcache, 1) self.assertEqual(result, RESULT_OK) self.assertAlmostEqual(volume, 127905.28250502056, delta=1.0E-6) if __name__ == "__main__": unittest.main()
53.516129
115
0.723327
4,415
0.887081
0
0
0
0
0
0
755
0.151698
715db019834eea3cecfac08bf5fe333bb00487eb
3,658
py
Python
samples/destroy_vm.py
jm66/pyvmomi-community-samples
5ca4a50b767500e07b9bce9fba70240bfa963a4e
[ "Apache-2.0" ]
4
2016-01-04T06:19:56.000Z
2018-09-09T01:03:07.000Z
samples/destroy_vm.py
zhangjiahaol/pyvmomi-community-samples
905ec34edfbd151531832e98b6a0748fa6ff5e0e
[ "Apache-2.0" ]
12
2019-04-17T02:47:25.000Z
2021-04-02T09:15:37.000Z
samples/destroy_vm.py
zhangjiahaol/pyvmomi-community-samples
905ec34edfbd151531832e98b6a0748fa6ff5e0e
[ "Apache-2.0" ]
15
2018-04-26T05:18:12.000Z
2021-11-06T04:44:58.000Z
#!/usr/bin/env python # Copyright 2015 Michael Rice <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import atexit from pyVim import connect from pyVmomi import vim from tools import cli from tools import tasks def setup_args(): """Adds additional ARGS to allow the vm name or uuid to be set. """ parser = cli.build_arg_parser() # using j here because -u is used for user parser.add_argument('-j', '--uuid', help='BIOS UUID of the VirtualMachine you want ' 'to destroy.') parser.add_argument('-n', '--name', help='DNS Name of the VirtualMachine you want to ' 'destroy.') parser.add_argument('-i', '--ip', help='IP Address of the VirtualMachine you want to ' 'destroy') parser.add_argument('-v', '--vm', help='VM name of the VirtualMachine you want ' 'to destroy.') my_args = parser.parse_args() return cli.prompt_for_password(my_args) def get_obj(content, vimtype, name): """Create contrainer view and search for object in it""" obj = None container = content.viewManager.CreateContainerView( content.rootFolder, vimtype, True) for c in container.view: if name: if c.name == name: obj = c break else: obj = c break container.Destroy() return obj ARGS = setup_args() SI = None try: SI = connect.SmartConnectNoSSL(host=ARGS.host, user=ARGS.user, pwd=ARGS.password, port=ARGS.port) atexit.register(connect.Disconnect, SI) except (IOError, vim.fault.InvalidLogin): pass if not SI: raise SystemExit("Unable to connect to host with supplied credentials.") VM = None if ARGS.vm: VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm) elif ARGS.uuid: VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid, True, False) elif ARGS.name: VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name, True) elif ARGS.ip: VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True) if VM is None: raise SystemExit( "Unable to locate VirtualMachine. Arguments given: " "vm - {0} , uuid - {1} , name - {2} , ip - {3}" .format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip) ) print("Found: {0}".format(VM.name)) print("The current powerState is: {0}".format(VM.runtime.powerState)) if format(VM.runtime.powerState) == "poweredOn": print("Attempting to power off {0}".format(VM.name)) TASK = VM.PowerOffVM_Task() tasks.wait_for_tasks(SI, [TASK]) print("{0}".format(TASK.info.state)) print("Destroying VM from vSphere.") TASK = VM.Destroy_Task() tasks.wait_for_tasks(SI, [TASK]) print("Done.")
31.264957
76
0.594587
0
0
0
0
0
0
0
0
1,344
0.367414
715e64156e2717f5d7270f3da98702a6b8223554
253
py
Python
helpers/Screen.py
1000monkeys/MastermindRedux
6b07a341ecbf2ea325949a49c84218cc3632cd33
[ "Unlicense" ]
null
null
null
helpers/Screen.py
1000monkeys/MastermindRedux
6b07a341ecbf2ea325949a49c84218cc3632cd33
[ "Unlicense" ]
null
null
null
helpers/Screen.py
1000monkeys/MastermindRedux
6b07a341ecbf2ea325949a49c84218cc3632cd33
[ "Unlicense" ]
null
null
null
import sys class Screen: def __init__(self) -> None: pass def handle_events(self, events): for event in events: if event.type == self.pygame.QUIT: sys.exit() def draw(self, screen): pass
19.461538
46
0.545455
241
0.952569
0
0
0
0
0
0
0
0
71602e883fba7821b66ac710b8b6c9c76a964d73
5,193
py
Python
VirtualStage/BackgroundMatting/fixed_threshold.py
chris-han/ailab
b77d90f9089fa8003095843aa5de718fe73965a7
[ "MIT" ]
null
null
null
VirtualStage/BackgroundMatting/fixed_threshold.py
chris-han/ailab
b77d90f9089fa8003095843aa5de718fe73965a7
[ "MIT" ]
null
null
null
VirtualStage/BackgroundMatting/fixed_threshold.py
chris-han/ailab
b77d90f9089fa8003095843aa5de718fe73965a7
[ "MIT" ]
null
null
null
import os def fixed_split(videos, thresholds, mask_suffix, overlap=0, background_path="/"): # crop target background video frames backgrounds = [os.path.join(background_path, f[:-4]) for f in os.listdir(background_path) if f.endswith(".mp4")] print(f"Splitting {len(backgrounds)} target background videos vertically by a fixed threshold") for i, background in enumerate(backgrounds): if i >= (len(thresholds)) or not thresholds[i]: continue try: os.makedirs(background + "_up") os.makedirs(background + "_dw") except FileExistsError: continue threshold = int(thresholds[i]) iup_region = f"iw:{threshold + overlap}:0:0" idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}" cmd=( f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" " f'-filter:v "crop={iup_region}" ' f"\"{os.path.join(background+'_up', '%04d_img.png')}\"" " > split_background_logs.txt 2>&1" ) code = os.system( cmd ) if code != 0: exit(code) code = os.system( f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" " f'-filter:v "crop={idw_region}" ' f"\"{os.path.join(background+'_dw', '%04d_img.png')}\"" " > split_background_logs.txt 2>&1" ) if code != 0: exit(code) print(f"Splitting {len(videos)} videos vertically by a fixed threshold") for i, video in enumerate(videos): if i >= (len(thresholds)) or not thresholds[i]: continue try: os.makedirs(video + "_up") os.makedirs(video + "_dw") except FileExistsError: continue threshold = int(thresholds[i]) iup_region = f"iw:{threshold + overlap}:0:0" idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}" # crop target background single image cmd = ( f"ffmpeg -y -i \"{video+'.png'}\" " f'-filter:v \"crop={iup_region}\" ' f"\"{video+'_up.png'}\"" " > split_logs.txt 2>&1" ) code = os.system( cmd ) if code != 0: exit(code) code = os.system( f"ffmpeg -y -i \"{video+'.png'}\" " f'-filter:v "crop={idw_region}" ' f"\"{video+'_dw.png'}\"" " > split_logs.txt 2>&1" ) if code != 0: exit(code) # crop color images cmd=( f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" " f'-filter:v "crop={iup_region}" ' f"\"{os.path.join(video+'_up', '%04d_img.png')}\"" " > split_logs.txt 2>&1" ) code = os.system( cmd ) if code != 0: exit(code) code = os.system( f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" " f'-filter:v "crop={idw_region}" ' f"\"{os.path.join(video+'_dw', '%04d_img.png')}\"" " > split_logs.txt 2>&1" ) if code != 0: exit(code) # crop mask images code = os.system( f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" " f'-filter:v "crop={iup_region}" ' f"\"{os.path.join(video+'_up', '%04d')}{mask_suffix}.png\"" " > split_logs.txt 2>&1" ) if code != 0: exit(code) code = os.system( f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" " f'-filter:v "crop={idw_region}" ' f"\"{os.path.join(video+'_dw', '%04d')}{mask_suffix}.png\"" " > split_logs.txt 2>&1" ) if code != 0: exit(code) print(f" Splitted {video} ({i+1}/{len(videos)})") def fixed_merge(videos, factors, output_dir, suffix, outputs_list, overlap=0): print(f"Reconstructing {len(videos)} output images") for i, video in enumerate(videos): if i < (len(factors)) and factors[i]: # video split, merging out_path = os.path.join(output_dir, os.path.basename(video)).replace( "\\", "/" ) try: os.makedirs(out_path + suffix) except FileExistsError: continue outpup = (out_path + "_up" + suffix).replace("\\", "/") outpdw = (out_path + "_dw" + suffix).replace("\\", "/") for o in outputs_list: code = os.system( f"ffmpeg -i \"{outpup}/%04d_{o}.png\" -i \"{outpdw}/%04d_{o}.png\" " f'-filter_complex "[0:0]crop=iw:ih-{overlap}:0:0[v0];' f"[1:0]crop=iw:ih-{overlap}:0:{overlap}[v1];" f'[v0][v1]vstack" ' f"\"{out_path + suffix}/%04d_{o}.png\" -hide_banner" " > merge_logs.txt" ) if code != 0: exit(code) print(f" Merged {video} ({i+1}/{len(videos)})")
33.720779
116
0.48238
0
0
0
0
0
0
0
0
2,184
0.420566
7160d131d6077709c38251321b7619b34bcdeab7
7,041
py
Python
hn2016_falwa/utilities.py
veredsil/hn2016_falwa
53035ac838860dd8a8d85619f16cc9785dee8655
[ "MIT" ]
null
null
null
hn2016_falwa/utilities.py
veredsil/hn2016_falwa
53035ac838860dd8a8d85619f16cc9785dee8655
[ "MIT" ]
null
null
null
hn2016_falwa/utilities.py
veredsil/hn2016_falwa
53035ac838860dd8a8d85619f16cc9785dee8655
[ "MIT" ]
null
null
null
import numpy as np from math import pi,exp def static_stability(height,area,theta,s_et=None,n_et=None): """ The function "static_stability" computes the vertical gradient (z-derivative) of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def- inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing. At the boundary, the static stability is estimated by forward/backward differen- cing involving two adjacent z-grid points: i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0]) stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1]) Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues Parameters ---------- height : sequence or array_like Array of z-coordinate [in meters] with dimension = (kmax), equally spaced area : ndarray Two-dimension numpy array specifying differential areal element of each grid point; dimension = (nlat, nlon). theta : ndarray Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat) s_et : int, optional Index of the latitude that defines the boundary of the Southern hemispheric domain; initialized as nlat/2 if not input n_et : int, optional Index of the latitude that defines the boundary of the Southern hemispheric domain; initialized as nlat/2 if not input Returns ------- t0_n : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Northern hemispheric domain with dimension = (kmax) t0_s : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Southern hemispheric domain with dimension = (kmax) stat_n : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric domain with dimension = (kmax) stat_s : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric domain with dimension = (kmax) """ nlat = theta.shape[1] if s_et==None: s_et = nlat//2 if n_et==None: n_et = nlat//2 stat_n = np.zeros(theta.shape[0]) stat_s = np.zeros(theta.shape[0]) if theta.ndim==3: zonal_mean = np.mean(theta,axis=-1) elif theta.ndim==2: zonal_mean = theta if area.ndim==2: area_zonal_mean = np.mean(area,axis=-1) elif area.ndim==1: area_zonal_mean = area csm_n_et = np.sum(area_zonal_mean[-n_et:]) csm_s_et = np.sum(area_zonal_mean[:s_et]) t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2]) stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2]) stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0]) stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1]) stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0]) stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1]) return t0_n,t0_s,stat_n,stat_s def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp, t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.): """ The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential vorticity based on the absolute vorticity, potential temperature and static stability given. Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues Parameters ---------- omega : float, optional Rotation rate of the planet. nlat : int Latitudinal dimension of the latitude grid. nlon : int Longitudinal dimension of the longitude grid. kmax : int Vertical dimension of the height grid. unih : sequence or array_like Numpy array of height in [meters]; dimension = (kmax) ylat : sequence or array_like Numpy array of latitudes in [degrees]; dimension = (nlat) avort : ndarray Three-dimension numpy array of absolute vorticity (i.e. relative vorticity + 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon) potential_temp : ndarray Three-dimension numpy array of potential temperature in [K]; dimension = (kmax x nlat x nlon) t0_cn : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Northern hemispheric domain with dimension = (kmax) t0_cs : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Southern hemispheric domain with dimension = (kmax) stat_cn : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric domain with dimension = (kmax) stat_cs : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric domain with dimension = (kmax) scale_height : float Scale height of the atmosphere in [m] with default value 7000. Returns ------- QGPV : ndarray Three-dimension numpy array of quasi-geostrophic potential vorticity; dimension = (kmax x nlat x nlon) dzdiv : ndarray Three-dimension numpy array of the stretching term in QGPV; dimension = (kmax x nlat x nlon) """ if nlat_s==None: nlat_s=nlat//2 clat = np.cos(ylat*pi/180.) clat = np.abs(clat) # Just to avoid the negative value at poles # --- Next, calculate PV --- av2 = np.empty_like(potential_temp) # dv/d(lon) av3 = np.empty_like(potential_temp) # du/d(lat) qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.) # Calculate the z-divergence term zdiv = np.empty_like(potential_temp) dzdiv = np.empty_like(potential_temp) for kk in range(kmax): # This is more efficient zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk] zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk] dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \ (zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \ /(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis]) dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \ (unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis]) dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \ (unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis]) qgpv = avort+dzdiv * av1 return qgpv, dzdiv
40.234286
111
0.656441
0
0
0
0
0
0
0
0
4,408
0.626047
7160dc5984a5a68781b1f9dc71bfe52a6ee535f4
12,570
py
Python
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py
JennyLawrance/azure-cli
cb9ca4b694110806b31803a95f9f315b2fde6410
[ "MIT" ]
null
null
null
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py
JennyLawrance/azure-cli
cb9ca4b694110806b31803a95f9f315b2fde6410
[ "MIT" ]
null
null
null
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py
JennyLawrance/azure-cli
cb9ca4b694110806b31803a95f9f315b2fde6410
[ "MIT" ]
null
null
null
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from argcomplete.completers import FilesCompleter from knack.arguments import CLIArgumentType from azure.cli.core.commands.parameters import (get_location_type, file_type, get_resource_name_completion_list, get_enum_type, get_three_state_flag) from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku, AllocationPolicy, AccessRightsDescription) from .custom import KeyType, SimpleAccessRights from ._validators import validate_policy_permissions from ._completers import get_device_id_completion_list hub_name_type = CLIArgumentType( completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'), help='IoT Hub name.') dps_name_type = CLIArgumentType( options_list=['--dps-name'], completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'), help='IoT Provisioning Service name') def load_arguments(self, _): # pylint: disable=too-many-statements # Arguments for IoT DPS with self.argument_context('iot dps') as c: c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name') with self.argument_context('iot dps create') as c: c.argument('location', get_location_type(self.cli_ctx), help='Location of your IoT Provisioning Service. Default is the location of target resource group.') c.argument('sku', arg_type=get_enum_type(IotDpsSku), help='Pricing tier for the IoT provisioning service.') c.argument('unit', help='Units in your IoT Provisioning Service.', type=int) for subgroup in ['access-policy', 'linked-hub', 'certificate']: with self.argument_context('iot dps {}'.format(subgroup)) as c: c.argument('dps_name', options_list=['--dps-name'], id_part=None) with self.argument_context('iot dps access-policy') as c: c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'], help='A friendly name for DPS access policy.') with self.argument_context('iot dps access-policy create') as c: c.argument('rights', options_list=['--rights', '-r'], nargs='+', arg_type=get_enum_type(AccessRightsDescription), help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.') c.argument('primary_key', help='Primary SAS key value.') c.argument('secondary_key', help='Secondary SAS key value.') with self.argument_context('iot dps access-policy update') as c: c.argument('rights', options_list=['--rights', '-r'], nargs='+', arg_type=get_enum_type(AccessRightsDescription), help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.') c.argument('primary_key', help='Primary SAS key value.') c.argument('secondary_key', help='Secondary SAS key value.') with self.argument_context('iot dps linked-hub') as c: c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.') with self.argument_context('iot dps linked-hub create') as c: c.argument('connection_string', help='Connection string of the IoT hub.') c.argument('location', get_location_type(self.cli_ctx), help='Location of the IoT hub.') c.argument('apply_allocation_policy', help='A boolean indicating whether to apply allocation policy to the IoT hub.', arg_type=get_three_state_flag()) c.argument('allocation_weight', help='Allocation weight of the IoT hub.') with self.argument_context('iot dps linked-hub update') as c: c.argument('apply_allocation_policy', help='A boolean indicating whether to apply allocation policy to the Iot hub.', arg_type=get_three_state_flag()) c.argument('allocation_weight', help='Allocation weight of the IoT hub.') with self.argument_context('iot dps allocation-policy update') as c: c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy), help='Allocation policy for the IoT provisioning service.') with self.argument_context('iot dps certificate') as c: c.argument('certificate_path', options_list=['--path', '-p'], type=file_type, completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.') c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'], help='A friendly name for the certificate.') c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.') # Arguments for IoT Hub with self.argument_context('iot') as c: c.argument('device_id', options_list=['--device-id', '-d'], help='Device Id.', completer=get_device_id_completion_list) with self.argument_context('iot hub') as c: c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name') c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.') for subgroup in ['consumer-group', 'policy', 'job', 'certificate']: with self.argument_context('iot hub {}'.format(subgroup)) as c: c.argument('hub_name', options_list=['--hub-name']) with self.argument_context('iot device') as c: c.argument('hub_name', hub_name_type) with self.argument_context('iot hub certificate') as c: c.argument('certificate_path', options_list=['--path', '-p'], type=file_type, completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.') c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.') with self.argument_context('iot hub consumer-group') as c: c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2', help='Event hub consumer group name.') c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.') with self.argument_context('iot hub policy') as c: c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1', help='Shared access policy name.') permission_values = ', '.join([x.value for x in SimpleAccessRights]) c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower, help='Permissions of shared access policy. Use space-separated list for multiple permissions. ' 'Possible values: {}'.format(permission_values)) with self.argument_context('iot hub job') as c: c.argument('job_id', id_part='child_name_1', help='Job Id.') with self.argument_context('iot hub create') as c: c.argument('hub_name', completer=None) c.argument('location', get_location_type(self.cli_ctx), help='Location of your IoT Hub. Default is the location of target resource group.') c.argument('sku', arg_type=get_enum_type(IotHubSku), help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. ' 'Note that only one free IoT hub instance is allowed in each ' 'subscription. Exception will be thrown if free instances exceed one.') c.argument('unit', help='Units in your IoT Hub.', type=int) c.argument('partition_count', help='The number of partitions for device-to-cloud messages.', type=int) with self.argument_context('iot hub show-connection-string') as c: c.argument('policy_name', help='Shared access policy to use.') c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.') with self.argument_context('iot device create') as c: c.argument('device_id', completer=None) with self.argument_context('iot device create', arg_group='X.509 Certificate') as c: c.argument('x509', action='store_true', help='Use X.509 certificate for device authentication.') c.argument('primary_thumbprint', help='Primary X.509 certificate thumbprint to authenticate device.') c.argument('secondary_thumbprint', help='Secondary X.509 certificate thumbprint to authenticate device.') c.argument('valid_days', type=int, help='Number of days the generated self-signed X.509 certificate should be ' 'valid for. Default validity is 365 days.') c.argument('output_dir', help='Output directory for generated self-signed X.509 certificate. ' 'Default is current working directory.') with self.argument_context('iot device list') as c: c.argument('top', help='Maximum number of device identities to return.', type=int) with self.argument_context('iot device delete') as c: c.argument('etag', help='ETag of the target device. It is used for the purpose of optimistic ' 'concurrency. Delete operation will be performed only if the specified ' 'ETag matches the value maintained by the server, indicating that the ' 'device identity has not been modified since it was retrieved. Default ' 'value is set to wildcard character (*) to force an unconditional ' 'delete.') with self.argument_context('iot device show-connection-string') as c: c.argument('top', type=int, help='Maximum number of connection strings to return.') c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.') with self.argument_context('iot device message') as c: c.argument('lock_token', help='Message lock token.') with self.argument_context('iot device message send', arg_group='Messaging') as c: c.argument('data', help='Device-to-cloud message body.') c.argument('message_id', help='Device-to-cloud message Id.') c.argument('correlation_id', help='Device-to-cloud message correlation Id.') c.argument('user_id', help='Device-to-cloud message user Id.') with self.argument_context('iot device message receive') as c: c.argument('lock_timeout', type=int, help='In case a message returned to this call, this specifies the amount of ' 'time in seconds, the message will be invisible to other receive calls.') with self.argument_context('iot device export') as c: c.argument('blob_container_uri', help='Blob Shared Access Signature URI with write access to a blob container.' 'This is used to output the status of the job and the results.') c.argument('include_keys', action='store_true', help='If set, keys are exported normally. Otherwise, keys are set to null in ' 'export output.') with self.argument_context('iot device import') as c: c.argument('input_blob_container_uri', help='Blob Shared Access Signature URI with read access to a blob container.' 'This blob contains the operations to be performed on the identity ' 'registry ') c.argument('output_blob_container_uri', help='Blob Shared Access Signature URI with write access to a blob container.' 'This is used to output the status of the job and the results.')
61.019417
120
0.631344
0
0
0
0
0
0
0
0
5,937
0.472315
7160eb99604d70299eb40716235e949ffc576a16
3,280
py
Python
metrics-calculator/tests/integration/test_s3.py
nhsconnect/prm-practice-migration-dashboard
40c8760f409834d05bde4fb015aa5f8765acaa82
[ "0BSD" ]
null
null
null
metrics-calculator/tests/integration/test_s3.py
nhsconnect/prm-practice-migration-dashboard
40c8760f409834d05bde4fb015aa5f8765acaa82
[ "0BSD" ]
null
null
null
metrics-calculator/tests/integration/test_s3.py
nhsconnect/prm-practice-migration-dashboard
40c8760f409834d05bde4fb015aa5f8765acaa82
[ "0BSD" ]
null
null
null
import boto3 import gzip from moto import mock_s3 import pytest import os from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist from tests.builders.file import build_gzip_csv @pytest.fixture(scope='function') def aws_credentials(): """Mocked AWS Credentials for moto.""" os.environ['AWS_ACCESS_KEY_ID'] = 'testing' os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing' os.environ['AWS_SECURITY_TOKEN'] = 'testing' os.environ['AWS_SESSION_TOKEN'] = 'testing' os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' @pytest.fixture(scope='function') def s3(aws_credentials): with mock_s3(): yield boto3.resource('s3', region_name='us-east-1') @mock_s3 def test_read_object_s3_returns_object_content(s3): bucket = s3.create_bucket(Bucket="test_bucket") s3_object = bucket.Object("test_object.csv.gz") gzipped_content = build_gzip_csv( header=["id", "message", "comment"], rows=[["123", "A message", "A comment"], [ "321", "Another message", "Another comment"]], ) s3_object.put( Body=gzipped_content ) expected = "id,message,comment\n123,A message,A comment\n321,Another message,Another comment" csv_stream = read_object_s3(s3, "s3://test_bucket/test_object.csv.gz") with gzip.open(csv_stream, mode="rt") as f: actual = f.read() assert actual == expected @mock_s3 def test_write_object_s3_writes_object_content(s3): s3.create_bucket(Bucket="test_bucket") json_string = b'{"fruit": "mango"}' write_object_s3(s3, "s3://test_bucket/test_object.json", json_string) s3_object_response = s3.Object("test_bucket", "test_object.json").get() assert s3_object_response["Body"].read() == json_string @mock_s3 def test_write_object_s3_writes_object_content_with_metadata(s3): s3.create_bucket(Bucket="test_bucket") json_string = b'{"fruit": "mango"}' metadata = { "start_date": "start-date", "end_date": "end-date" } write_object_s3(s3, "s3://test_bucket/test_object.json", json_string, metadata) s3_object_response = s3.Object("test_bucket", "test_object.json").get() assert s3_object_response["Metadata"] == metadata @mock_s3 def test_objects_exist_returns_true_when_all_objects_exist(s3): s3.create_bucket(Bucket="test_bucket") object_one = "object-one" object_two = "object-two" write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content') write_object_s3(s3, f"s3://test_bucket/{object_two}", 'object-two-content') result = objects_exist(s3, "test_bucket", [object_one, object_two]) assert result @mock_s3 def test_objects_exist_returns_false_when_only_one_object_exists(s3): s3.create_bucket(Bucket="test_bucket") object_one = "object-one" object_two = "object-two" write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content') result = objects_exist(s3, "test_bucket", [object_one, object_two]) assert not result @mock_s3 def test_objects_exist_returns_false_when_no_objects_exist(s3): s3.create_bucket(Bucket="test_bucket") object_one = "object-one" object_two = "object-two" result = objects_exist(s3, "test_bucket", [object_one, object_two]) assert not result
28.521739
97
0.710366
0
0
104
0.031707
3,061
0.933232
0
0
1,031
0.314329
716145a9d2a82e68a98031ac79781824db56e9c8
13,528
py
Python
image_analogy/losses/patch_matcher.py
kaldap/image-analogies
0867aedfae7dfc0d27c42805a3d07f7b9eb7eaa2
[ "MIT" ]
3,722
2016-02-28T18:03:51.000Z
2022-03-29T18:03:30.000Z
image_analogy/losses/patch_matcher.py
germanmad/image-analogies
066626149ccb96b0a0488ca7ea4fc992aa62b727
[ "MIT" ]
58
2016-02-28T03:23:43.000Z
2022-03-11T23:14:08.000Z
image_analogy/losses/patch_matcher.py
germanmad/image-analogies
066626149ccb96b0a0488ca7ea4fc992aa62b727
[ "MIT" ]
351
2016-03-05T03:22:48.000Z
2022-03-01T09:06:33.000Z
import numpy as np import scipy.interpolate import scipy.ndimage from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d def _calc_patch_grid_dims(shape, patch_size, patch_stride): x_w, x_h, x_c = shape num_rows = 1 + (x_h - patch_size) // patch_stride num_cols = 1 + (x_w - patch_size) // patch_stride return num_rows, num_cols def make_patch_grid(x, patch_size, patch_stride=1): '''x shape: (num_channels, rows, cols)''' x = x.transpose(2, 1, 0) patches = extract_patches_2d(x, (patch_size, patch_size)) x_w, x_h, x_c = x.shape num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride) patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c)) patches = patches.transpose((0, 1, 4, 2, 3)) #patches = np.rollaxis(patches, -1, 2) return patches def combine_patches_grid(in_patches, out_shape): '''Reconstruct an image from these `patches` input shape: (rows, cols, channels, patch_row, patch_col) ''' num_rows, num_cols = in_patches.shape[:2] num_channels = in_patches.shape[-3] patch_size = in_patches.shape[-1] num_patches = num_rows * num_cols in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc) in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels) recon = reconstruct_from_patches_2d(in_patches, out_shape) return recon.transpose(2, 1, 0).astype(np.float32) class PatchMatcher(object): '''A matcher of image patches inspired by the PatchMatch algorithm. image shape: (width, height, channels) ''' def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5, num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5): self.input_shape = input_shape self.patch_size = patch_size self.patch_stride = patch_stride self.jump_size = jump_size self.num_propagation_steps = num_propagation_steps self.num_random_steps = num_random_steps self.random_max_radius = random_max_radius self.random_scale = random_scale self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride) self.target_patches = make_patch_grid(target_img, patch_size) self.target_patches_normed = self.normalize_patches(self.target_patches) self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels (2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]] self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32) self.min_propagration_row = 1.0 / self.num_input_rows self.min_propagration_col = 1.0 / self.num_input_cols self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]]) self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]]) def update(self, input_img, reverse_propagation=False): input_patches = self.get_patches_for(input_img) self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation) def update_with_patches(self, input_patches, reverse_propagation=False): self._propagate(input_patches, reverse_propagation=reverse_propagation) self._random_update(input_patches) def get_patches_for(self, img): return make_patch_grid(img, self.patch_size); def normalize_patches(self, patches): norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True)) return patches / norm def _propagate(self, input_patches, reverse_propagation=False): if reverse_propagation: roll_direction = 1 else: roll_direction = -1 sign = float(roll_direction) for step_i in range(self.num_propagation_steps): new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign) coords_row, similarity_row = self.eval_state(new_coords, input_patches) new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign) coords_col, similarity_col = self.eval_state(new_coords, input_patches) self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col) def _random_update(self, input_patches): for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1 new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha) self.coords, self.similarity = self.eval_state(new_coords, input_patches) def eval_state(self, new_coords, input_patches): new_similarity = self.patch_similarity(input_patches, new_coords) delta_similarity = new_similarity - self.similarity coords = np.where(delta_similarity > 0, new_coords, self.coords) best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity) return coords, best_similarity def take_best(self, coords_a, similarity_a, coords_b, similarity_b): delta_similarity = similarity_a - similarity_b best_coords = np.where(delta_similarity > 0, coords_a, coords_b) best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b) return best_coords, best_similarity def patch_similarity(self, source, coords): '''Check the similarity of the patches specified in coords.''' target_vals = self.lookup_coords(self.target_patches_normed, coords) err = source * target_vals return np.sum(err, axis=(2, 3, 4)) def clip_coords(self, coords): # TODO: should this all be in pixel space? coords = np.clip(coords, 0.0, 1.0) return coords def lookup_coords(self, x, coords): x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1) i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32') return x[i_coords[0], i_coords[1]] def get_reconstruction(self, patches=None, combined=None): if combined is not None: patches = make_patch_grid(combined, self.patch_size) if patches is None: patches = self.target_patches patches = self.lookup_coords(patches, self.coords) recon = combine_patches_grid(patches, self.input_shape) return recon def scale(self, new_shape, new_target_img): '''Create a new matcher of the given shape and replace its state with a scaled up version of the current matcher's state. ''' new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size, patch_stride=self.patch_stride, jump_size=self.jump_size, num_propagation_steps=self.num_propagation_steps, num_random_steps=self.num_random_steps, random_max_radius=self.random_max_radius, random_scale=self.random_scale) new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour') new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour') return new_matcher def congrid(a, newdims, method='linear', centre=False, minusone=False): '''Arbitrary resampling of source array to new dimension sizes. Currently only supports maintaining the same number of dimensions. To use 1-D arrays, first promote them to shape (x,1). Uses the same parameters and creates the same co-ordinate lookup points as IDL''s congrid routine, which apparently originally came from a VAX/VMS routine of the same name. method: neighbour - closest value from original data nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d (see Numerical Recipes for validity of use of n 1-D interpolations) spline - uses ndimage.map_coordinates centre: True - interpolation points are at the centres of the bins False - points are at the front edge of the bin minusone: For example- inarray.shape = (i,j) & new dimensions = (x,y) False - inarray is resampled by factors of (i/x) * (j/y) True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1) This prevents extrapolation one element beyond bounds of input array. ''' if not a.dtype in [np.float64, np.float32]: a = np.cast[float](a) m1 = np.cast[int](minusone) ofs = np.cast[int](centre) * 0.5 old = np.array( a.shape ) ndims = len( a.shape ) if len( newdims ) != ndims: print("[congrid] dimensions error. " \ "This routine currently only support " \ "rebinning to the same number of dimensions.") return None newdims = np.asarray( newdims, dtype=float ) dimlist = [] if method == 'neighbour': for i in range( ndims ): base = np.indices(newdims)[i] dimlist.append( (old[i] - m1) / (newdims[i] - m1) \ * (base + ofs) - ofs ) cd = np.array( dimlist ).round().astype(int) newa = a[list( cd )] return newa elif method in ['nearest','linear']: # calculate new dims for i in range( ndims ): base = np.arange( newdims[i] ) dimlist.append( (old[i] - m1) / (newdims[i] - m1) \ * (base + ofs) - ofs ) # specify old dims olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )] # first interpolation - for ndims = any mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method ) newa = mint( dimlist[-1] ) trorder = [ndims - 1] + range( ndims - 1 ) for i in range( ndims - 2, -1, -1 ): newa = newa.transpose( trorder ) mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method ) newa = mint( dimlist[i] ) if ndims > 1: # need one more transpose to return to original dimensions newa = newa.transpose( trorder ) return newa elif method in ['spline']: oslices = [ slice(0,j) for j in old ] oldcoords = np.ogrid[oslices] nslices = [ slice(0,j) for j in list(newdims) ] newcoords = np.mgrid[nslices] newcoords_dims = range(np.rank(newcoords)) #make first index last newcoords_dims.append(newcoords_dims.pop(0)) newcoords_tr = newcoords.transpose(newcoords_dims) # makes a view that affects newcoords newcoords_tr += ofs deltas = (np.asarray(old) - m1) / (newdims - m1) newcoords_tr *= deltas newcoords_tr -= ofs newa = scipy.ndimage.map_coordinates(a, newcoords) return newa else: print("Congrid error: Unrecognized interpolation type.\n", \ "Currently only \'neighbour\', \'nearest\',\'linear\',", \ "and \'spline\' are supported.") return None if __name__ == '__main__': import sys import time from scipy.misc import imsave from image_analogy.img_utils import load_image, preprocess_image, deprocess_image content_image_path, style_image_path, output_prefix = sys.argv[1:] jump_size = 1.0 num_steps = 7 patch_size = 1 patch_stride = 1 feat_chans = 512 feat_style_shape = (feat_chans, 12, 18) feat_style = np.random.uniform(0.0, 1.0, feat_style_shape) feat_in_shape = (feat_chans, 17, 10) feat_in = np.random.uniform(0.0, 1.0, feat_in_shape) matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size) feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in)) for i in range(num_steps): matcher.update_with_patches(feat_in_normed) r = matcher.get_reconstruction() content_img_img = load_image(content_image_path) content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1] content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0)) style_img = load_image(style_image_path) style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1] style_img = preprocess_image( load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0)) pg = make_patch_grid(content_img, patch_size) result = combine_patches_grid(pg, content_img.shape[::-1]) outimg = deprocess_image(result, contrast_percent=0) imsave(output_prefix + '_bestre.png', outimg) # # # matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size) for i in range(num_steps): start = time.time() matcher.update(content_img, reverse_propagation=bool(i % 2)) print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean()) end = time.time() #print end-start start = time.time() result = matcher.get_reconstruction(patches=matcher.target_patches) print(result.shape) end = time.time() print(end-start) outimg = deprocess_image(result, contrast_percent=0) # # imsave takes (rows, cols, channels) imsave(output_prefix + '_best.png', outimg)
43.922078
171
0.671348
5,921
0.437685
0
0
0
0
0
0
2,470
0.182584
716192be9eb9b6903ed659ac040571121cd26498
344
py
Python
muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py
desafinadude/muni-portal-backend
9ffc447194b8f29619585cd919f67d62062457a3
[ "MIT" ]
1
2021-01-18T13:01:04.000Z
2021-01-18T13:01:04.000Z
muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py
desafinadude/muni-portal-backend
9ffc447194b8f29619585cd919f67d62062457a3
[ "MIT" ]
42
2020-08-29T08:55:53.000Z
2021-04-14T16:41:29.000Z
muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py
desafinadude/muni-portal-backend
9ffc447194b8f29619585cd919f67d62062457a3
[ "MIT" ]
2
2020-10-28T16:34:41.000Z
2022-02-07T10:29:31.000Z
# Generated by Django 2.2.10 on 2021-02-24 09:42 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0029_auto_20210224_0936'), ] operations = [ migrations.RemoveField( model_name='servicerequest', name='mobile_reference', ), ]
19.111111
48
0.610465
258
0.75
0
0
0
0
0
0
113
0.328488
7161bb83a934c99f17f3988c15fe48d8592c6f29
1,247
py
Python
rllib/agents/ppo/tests/test_appo.py
noahshpak/ray
edd783bc327760a4892ab89222ee551e42df15b9
[ "Apache-2.0" ]
2
2020-02-17T17:36:23.000Z
2020-08-24T19:59:18.000Z
rllib/agents/ppo/tests/test_appo.py
noahshpak/ray
edd783bc327760a4892ab89222ee551e42df15b9
[ "Apache-2.0" ]
8
2020-11-13T19:02:47.000Z
2022-03-12T00:44:51.000Z
rllib/agents/ppo/tests/test_appo.py
noahshpak/ray
edd783bc327760a4892ab89222ee551e42df15b9
[ "Apache-2.0" ]
1
2021-07-26T07:17:06.000Z
2021-07-26T07:17:06.000Z
import unittest import ray import ray.rllib.agents.ppo as ppo from ray.rllib.utils.test_utils import check_compute_single_action, \ framework_iterator class TestAPPO(unittest.TestCase): @classmethod def setUpClass(cls): ray.init() @classmethod def tearDownClass(cls): ray.shutdown() def test_appo_compilation(self): """Test whether an APPOTrainer can be built with both frameworks.""" config = ppo.appo.DEFAULT_CONFIG.copy() config["num_workers"] = 1 num_iterations = 2 for _ in framework_iterator(config, frameworks=("torch", "tf")): _config = config.copy() trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0") for i in range(num_iterations): print(trainer.train()) check_compute_single_action(trainer) _config = config.copy() _config["vtrace"] = True trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0") for i in range(num_iterations): print(trainer.train()) check_compute_single_action(trainer) if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
29
76
0.630313
982
0.78749
0
0
119
0.095429
0
0
140
0.112269