id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
6524933
|
#coding=utf-8
import smtplib, mimetypes
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.utils import COMMASPACE
#mailto = ['<EMAIL>']
def sendMail(mailfrom='<EMAIL>', mailto='<EMAIL>', subject='x日志分析平台后台', content='测试内容'):
msg = MIMEMultipart()
#msg['From'] = "<EMAIL>"
#mailto = ['<EMAIL>']
msg['To'] = COMMASPACE.join(mailto)
msg['Subject'] = subject
#添加邮件内容
txt = MIMEText(content)
msg.attach(txt)
# #添加二进制附件
# fileName = r'e:/PyQt4.rar'
# ctype, encoding = mimetypes.guess_type(fileName)
# if ctype is None or encoding is not None:
# ctype = 'application/octet-stream'
# maintype, subtype = ctype.split('/', 1)
# att1 = MIMEImage((lambda f: (f.read(), f.close()))(open(fileName, 'rb'))[0], _subtype = subtype)
# att1.add_header('Content-Disposition', 'attachment', filename = fileName)
# msg.attach(att1)
#发送邮件
smtp = smtplib.SMTP()
smtp.connect('xxx.baixdu.com')
#smtp.login('from', '密码')
ret = smtp.sendmail(mailfrom, mailto, msg.as_string())
smtp.quit()
print '邮件发送成功' ,mailto
if __name__ == '__main__':
mailto = ['<EMAIL>']
sendMail(mailto=mailto)
|
StarcoderdataPython
|
3579015
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from fbprophet import Prophet
import utils.AlphaVantageUtils as av
import utils.PostgresUtils as pg
import utils.ModelUtils as mdl
df_prices = pg.get_prices_with_features(av._TIC_MICROSOFT, av._INT_DAILY)
name = pg.get_symbol_name(av._TIC_MICROSOFT)
df_prices.drop(columns=['open', 'high', 'low', 'volume'], inplace=True)
df_train, df_test = mdl.train_test_split(df_prices, 1000)
|
StarcoderdataPython
|
392725
|
<filename>main.py
"""
Created by Epic at 9/5/20
"""
from color_format import basicConfig
import speedcord
from speedcord.http import Route, HttpClient, LockManager
from os import environ as env
from logging import getLogger, DEBUG
from aiohttp import ClientSession
from aiohttp.client_ws import ClientWebSocketResponse, WSMessage, WSMsgType
from ujson import loads
from urllib.parse import quote as uriquote
from asyncio import Lock, sleep
from speedcord.exceptions import NotFound, Unauthorized, Forbidden, HTTPException
ws: ClientWebSocketResponse = None
client = speedcord.Client(intents=1)
basicConfig(getLogger())
logger = getLogger("worker")
logger.setLevel(DEBUG)
handlers = {}
total_guilds_served = 0
class CustomHttp(HttpClient):
async def request(self, route: Route, **kwargs):
bucket = route.bucket
for i in range(self.retry_attempts):
if not self.global_lock.is_set():
self.logger.debug("Sleeping for Global Rate Limit")
await self.global_lock.wait()
ratelimit_lock: Lock = self.ratelimit_locks.get(bucket, Lock(loop=self.loop))
await ratelimit_lock.acquire()
with LockManager(ratelimit_lock) as lockmanager:
# Merge default headers with the users headers,
# could probably use a if to check if is headers set?
# Not sure which is optimal for speed
kwargs["headers"] = {
**self.default_headers, **kwargs.get("headers", {})
}
# Format the reason
try:
reason = kwargs.pop("reason")
except KeyError:
pass
else:
if reason:
kwargs["headers"]["X-Audit-Log-Reason"] = uriquote(
reason, safe="/ ")
r = await self.session.request(route.method,
self.baseuri + route.path,
**kwargs)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
# we've depleted our current bucket
delta = float(r.headers.get("X-Ratelimit-Reset-After"))
self.logger.debug(
f"Ratelimit exceeded. Bucket: {bucket}. Retry after: "
f"{delta}")
lockmanager.defer()
self.loop.call_later(delta, ratelimit_lock.release)
status_code = r.status
if status_code == 404:
raise NotFound(r)
elif status_code == 401:
raise Unauthorized(r)
elif status_code == 403:
raise Forbidden(r, await r.text())
elif status_code == 429:
if not r.headers.get("Via"):
# Cloudflare banned?
raise HTTPException(r, await r.text())
data = await r.json()
retry_after = data["retry_after"] / 1000
is_global = data.get("global", False)
if is_global:
await ws.send_json({"t": "ratelimit", "d": "global"})
self.logger.warning(
f"Global ratelimit hit! Retrying in "
f"{retry_after}s")
else:
await ws.send_json({"t": "ratelimit", "d": bucket})
self.logger.warning(
f"A ratelimit was hit (429)! Bucket: {bucket}. "
f"Retrying in {retry_after}s")
await sleep(retry_after)
continue
return r
async def handle_worker():
global ws
session = ClientSession()
async with session.ws_connect(f"ws://{env['WORKER_MANAGER_HOST']}:6060/workers") as ws:
await ws.send_json({
"t": "identify",
"d": None
})
message: WSMessage
async for message in ws:
if message.type == WSMsgType.TEXT:
data = message.json(loads=loads)
handler = handlers.get(data["t"], None)
if handler is None:
continue
client.loop.create_task(handler(data["d"]))
async def handle_dispatch_bot_info(data: dict):
client.token = data["token"]
client.name = data["name"]
logger.info(f"Started worker with name {client.name}!")
client.http = CustomHttp(client.token)
await client.connect()
async def handle_request(data: dict):
request_data = data["data"]
method = request_data["method"]
path = request_data["path"]
params = request_data["route_params"]
kwargs = params["kwargs"]
route = Route(method, path, **params)
logger.debug(f"{method} {path}")
r = await client.http.request(route, **kwargs)
if r.status < 200 or r.status >= 300:
logger.warning(await r.text())
@client.listen("GUILD_CREATE")
async def on_guild_create(data, shard):
global total_guilds_served
await ws.send_json({"t": "add_guild", "d": data["id"]})
total_guilds_served += 1
logger.debug(f"New guild to serve: {data['name']}. Now serving {total_guilds_served} guilds.")
@client.listen("GUILD_DELETE")
async def on_guild_delete(data, shard):
global total_guilds_served
total_guilds_served -= 1
await ws.send_json({"t": "remove_guild", "d": data["id"]})
handlers["request"] = handle_request
handlers["dispatch_bot_info"] = handle_dispatch_bot_info
client.loop.run_until_complete(handle_worker())
client.loop.run_forever()
|
StarcoderdataPython
|
3231300
|
"""`AlphaIMS`, `AlphaAMS`"""
import numpy as np
from collections import OrderedDict
from .base import DiskElectrode, ElectrodeArray, ElectrodeGrid, ProsthesisSystem
class AlphaIMS(ProsthesisSystem):
"""Alpha IMS
This class creates an AlphaIMS array and places it on the retina
such that the center of the array is located at (x,y,z), given in
microns, and the array is rotated by rotation angle ``rot``, given in
radians.
The array is oriented upright in the visual field, such that an
array with center (0,0) has the top three rows lie in the lower
retina (upper visual field).
An electrode can be addressed by name, row/column index, or integer index
(into the flattened array).
.. note::
Column order is reversed in a left-eye implant.
Parameters
----------
x : float
x coordinate of the array center (um)
y : float
y coordinate of the array center (um)
z: float or array_like
Distance of the array to the retinal surface (um). Either a list
with 60 entries or a scalar.
rot : float
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : 'LE' or 'RE', optional, default: 'RE'
Eye in which array is implanted.
Examples
--------
Create an AlphaIMS array centered on the fovea, at 100um distance from
the retina:
>>> from pulse2percept.implants import AlphaIMS
>>> AlphaIMS(x=0, y=0, z=100, rot=0) # doctest: +NORMALIZE_WHITESPACE
AlphaIMS(earray=pulse2percept.implants.base.ElectrodeGrid, eye='RE',
shape=(37, 37), stim=None)
Get access to the third electrode in the top row (by name or by row/column
index):
>>> alpha_ims = AlphaIMS(x=0, y=0, z=100, rot=0)
>>> alpha_ims['A3']
DiskElectrode(r=50.0, x=-1152.0, y=-1296.0, z=100.0)
>>> alpha_ims[0, 2]
DiskElectrode(r=50.0, x=-1152.0, y=-1296.0, z=100.0)
"""
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None):
self.eye = eye
self.shape = (37, 37)
elec_radius = 50
e_spacing = 72 # um
self.earray = ElectrodeGrid(self.shape, e_spacing, x=x, y=y, z=z,
rot=rot, etype=DiskElectrode,
r=elec_radius)
# Set stimulus if available:
self.stim = stim
# Set left/right eye:
if not isinstance(eye, str):
raise TypeError("'eye' must be a string, either 'LE' or 'RE'.")
if eye != 'LE' and eye != 'RE':
raise ValueError("'eye' must be either 'LE' or 'RE'.")
# Unfortunately, in the left eye the labeling of columns is reversed...
if eye == 'LE':
# FIXME: Would be better to have more flexibility in the naming
# convention. This is a quick-and-dirty fix:
names = list(self.earray.keys())
objects = list(self.earray.values())
names = np.array(names).reshape(self.earray.shape)
# Reverse column names:
for row in range(self.earray.shape[0]):
names[row] = names[row][::-1]
# Build a new ordered dict:
electrodes = OrderedDict([])
for name, obj in zip(names.ravel(), objects):
electrodes.update({name: obj})
# Assign the new ordered dict to earray:
self.earray.electrodes = electrodes
def get_params(self):
params = super().get_params()
params.update({'shape': self.shape})
return params
class AlphaAMS(ProsthesisSystem):
"""Alpha AMS
This class creates an AlphaAMS array and places it below the retina
such that the center of the array is located at (x,y,z), given in
microns, and the array is rotated by rotation angle ``rot``, given in
radians.
The array is oriented upright in the visual field, such that an
array with center (0,0) has the top three rows lie in the lower
retina (upper visual field), as shown below:
An electrode can be addressed by name, row/column index, or integer index
(into the flattened array).
.. note::
Column order is reversed in a left-eye implant.
Parameters
----------
x : float
x coordinate of the array center (um)
y : float
y coordinate of the array center (um)
z: float || array_like
Distance of the array to the retinal surface (um). Either a list
with 60 entries or a scalar.
rot : float
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'LE', 'RE'}, optional, default: 'RE'
Eye in which array is implanted.
Examples
--------
Create an AlphaAMS array centered on the fovea, at 100um distance from
the retina:
>>> from pulse2percept.implants import AlphaAMS
>>> AlphaAMS(x=0, y=0, z=100, rot=0) # doctest: +NORMALIZE_WHITESPACE
AlphaAMS(earray=pulse2percept.implants.base.ElectrodeGrid, eye='RE',
shape=(40, 40), stim=None)
Get access to the third electrode in the top row (by name or by row/column
index):
>>> alpha_ims = AlphaAMS(x=0, y=0, z=100, rot=0)
>>> alpha_ims['A3']
DiskElectrode(r=15.0, x=-1225.0, y=-1365.0, z=100.0)
>>> alpha_ims[0, 2]
DiskElectrode(r=15.0, x=-1225.0, y=-1365.0, z=100.0)
"""
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None):
self.eye = eye
self.shape = (40, 40)
elec_radius = 15
e_spacing = 70 # um
self.earray = ElectrodeGrid(self.shape, e_spacing, x=x, y=y, z=z,
rot=rot, etype=DiskElectrode,
r=elec_radius)
# Set stimulus if available:
self.stim = stim
# Set left/right eye:
if not isinstance(eye, str):
raise TypeError("'eye' must be a string, either 'LE' or 'RE'.")
if eye != 'LE' and eye != 'RE':
raise ValueError("'eye' must be either 'LE' or 'RE'.")
# Unfortunately, in the left eye the labeling of columns is reversed...
if eye == 'LE':
# FIXME: Would be better to have more flexibility in the naming
# convention. This is a quick-and-dirty fix:
names = list(self.earray.keys())
objects = list(self.earray.values())
names = np.array(names).reshape(self.earray.shape)
# Reverse column names:
for row in range(self.earray.shape[0]):
names[row] = names[row][::-1]
# Build a new ordered dict:
electrodes = OrderedDict([])
for name, obj in zip(names.ravel(), objects):
electrodes.update({name: obj})
# Assign the new ordered dict to earray:
self.earray.electrodes = electrodes
def get_params(self):
params = super().get_params()
params.update({'shape': self.shape})
return params
|
StarcoderdataPython
|
5015155
|
<reponame>Max-astro/A2Project
basePath = '/Raid1/Illustris/TNG/'
import numpy as np
from illustris_python.snapshot import loadSubhalo
from illustris_python.groupcat import loadSubhalos
def specific_angular_momentum(x, v, m):
"""
specific angular momentum of a group of particles
Parameters
----------
x : array_like
array particle positions of shape (Nptcl, ndim)
v : array_like
array of particle velcities wth shape (Nptcl, ndim)
m : array_like
array of particle masses of shape (Nptcl,)
Returns
-------
L : nump.array
specific angular momentum vector
"""
return (m[:,np.newaxis]*np.cross(x,v)).sum(axis=0)
def galaxy_ang_mom(gal_id, basePath, snapNum, reduced=True):
"""
Parameters
----------
gal_id : int
basepath : string
snapNum : int
Lbox : array_like
reduced : bool
Returns
-------
eig_vals, eig_vecs
"""
# load galaxy position (most bound particle)
gal_positions = loadSubhalos(basePath, snapNum, fields=['SubhaloPos'])/1000.0
gal_position = gal_positions[gal_id]
# half mass radius
gal_rhalfs = loadSubhalos(basePath, snapNum, fields=['SubhaloHalfmassRadType'])[:,4]/1000.0
gal_rhalf = gal_rhalfs[gal_id]
# load stellar particles
ptcl_coords = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['Coordinates'])/1000.0
ptcl_masses = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['Masses'])*10.0**10
ptcl_vels = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['Velocities'])
sf_time = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['GFM_StellarFormationTime'])
is_a_star = (sf_time>=0.0) # don't use wind particles
# account for PBCs
dx = ptcl_coords[:,0] - gal_position[0]
dy = ptcl_coords[:,1] - gal_position[1]
dz = ptcl_coords[:,2] - gal_position[2]
ptcl_coords = np.vstack((dx,dy,dz)).T
r = np.sqrt(np.sum(ptcl_coords**2, axis=1))/gal_rhalf
mask = (r<=10.0) & (is_a_star)
L = specific_angular_momentum(ptcl_coords[mask], ptcl_vels[mask], ptcl_masses[mask])
mag_L = np.sqrt(np.sum(L**2,axis=-1))
return L, mag_L, L/mag_L
|
StarcoderdataPython
|
11203633
|
from .app import get_application
app = get_application()
|
StarcoderdataPython
|
6401489
|
<filename>setup.py
from setuptools import find_packages, setup
setup(name='transfer-contacts',
version='0.1.0',
description="Helps extract notes from a certain contact database program.",
author="<NAME>",
url='https://github.com/juharris/transfer-contacts',
license="MIT",
packages=find_packages(),
install_requires=[
'pandas',
]
)
|
StarcoderdataPython
|
1664380
|
<filename>lib/__init__.py
import sys
from pathlib import Path
from pathlib import Path
lib_dir = (Path(__file__).parent).resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
|
StarcoderdataPython
|
3285248
|
<reponame>KonstantinosAng/CodeWars
# see https://www.codewars.com/kata/5266876b8f4bf2da9b000362/train/python
from TestFunction import Test
def likes(names):
if len(names) < 1: return "no one likes this"
if len(names) == 1: return f"{names[0]} likes this"
if len(names) == 2: return f"{names[0]} and {names[1]} like this"
if len(names) == 3: return f"{names[0]}, {names[1]} and {names[2]} like this"
return f"{names[0]}, {names[1]} and {len(names) - 2} others like this"
test = Test(None)
test.it('Basic tests')
test.assert_equals(likes([]), 'no one likes this')
test.assert_equals(likes(['Peter']), 'Peter likes this')
test.assert_equals(likes(['Jacob', 'Alex']), 'Jacob and Alex like this')
test.assert_equals(likes(['Max', 'John', 'Mark']), 'Max, John and Mark like this')
test.assert_equals(likes(['Alex', 'Jacob', 'Mark', 'Max']), 'Alex, Jacob and 2 others like this')
|
StarcoderdataPython
|
12850443
|
<filename>src/worker/worker.py
"""Worker application.
It calls an external slow task and send its output, line by line, as "log"
events through SocketIO. The web page will then print the lines.
"""
# Disable the warning because eventlet must patch the standard library as soon
# as possible.
from communication import (CELERY,
get_socketio) # pylint: disable=wrong-import-order
import socket
from datetime import datetime
from subprocess import PIPE, Popen
SOCKETIO = get_socketio()
def announce():
"""Tell this worker is up and running."""
hostname = socket.gethostname()
time = datetime.now().strftime('%H:%M:%S')
msg = '{} Worker {} is up.'.format(time, hostname)
SOCKETIO.emit('log', {'data': msg})
announce()
@CELERY.task
def add_task(name):
"""Run the slow task as a subprocess and send results to the web site."""
args = './slow_task.sh', str(name)
with Popen(args, stdout=PIPE, universal_newlines=True) as proc:
for line in proc.stdout:
SOCKETIO.emit('log', {'data': line.rstrip()})
|
StarcoderdataPython
|
142493
|
<gh_stars>0
"""
Reference implementation of the MP3 correlation energy utilizing antisymmetrized
spin-orbitals from an RHF reference.
Requirements:
SciPy 0.13.0+, NumPy 1.7.2+
References:
Equations from [Szabo:1996]
"""
__authors__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__copyright__ = "(c) 2014-2017, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-05-23"
import time
import numpy as np
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Memory for Psi4 in GB
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Memory for numpy in GB
numpy_memory = 2
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({'basis': 'cc-pvdz',
'scf_type': 'pk',
'mp2_type': 'conv',
'mp2_type': 'conv',
'freeze_core': 'false',
'e_convergence': 1e-8,
'd_convergence': 1e-8})
# First compute RHF energy using Psi4
scf_e, wfn = psi4.energy('SCF', return_wfn=True)
# Grab data from
C = wfn.Ca()
ndocc = wfn.doccpi()[0]
nmo = wfn.nmo()
SCF_E = wfn.energy()
eps = np.asarray(wfn.epsilon_a())
# Compute size of ERI tensor in GB
ERI_Size = (nmo ** 4) * 8e-9
print('Size of the ERI/MO tensor will be %4.2f GB.' % ERI_Size)
memory_footprint = ERI_Size * 2.5
if memory_footprint > numpy_memory:
clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds numpy_memory \
limit of %4.2f GB." % (memory_footprint, numpy_memory))
#Make spin-orbital MO
t=time.time()
print('Starting ERI build and spin AO -> spin-orbital MO transformation...')
mints = psi4.core.MintsHelper(wfn.basisset())
MO = np.asarray(mints.mo_spin_eri(C, C))
eps = np.repeat(eps, 2)
nso = nmo * 2
print('..finished transformation in %.3f seconds.\n' % (time.time() - t))
# Update nocc and nvirt
nocc = ndocc * 2
nvirt = MO.shape[0] - nocc
# Build epsilon tensor
eocc = eps[:nocc]
evir = eps[nocc:]
epsilon = 1/(eocc.reshape(-1, 1, 1, 1) + eocc.reshape(-1, 1, 1) - evir.reshape(-1, 1) - evir)
# Create occupied and virtual slices
o = slice(0, nocc)
v = slice(nocc, MO.shape[0])
# MP2 Correlation: [Szabo:1996] pp. 352, Eqn 6.72
MP2corr_E = 0.25 * np.einsum('abrs,rsab,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)
MP2total_E = SCF_E + MP2corr_E
print('MP2 correlation energy: %16.10f' % MP2corr_E)
print('MP2 total energy: %16.10f' % MP2total_E)
# Compare to Psi4
psi4.compare_values(psi4.energy('MP2'), MP2total_E, 6, 'MP2 Energy')
# MP3 Correlation: [Szabo:1996] pp. 353, Eqn. 6.75
eqn1 = 0.125 * np.einsum('abrs,cdab,rscd,abrs,cdrs->', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)
eqn2 = 0.125 * np.einsum('abrs,rstu,tuab,abrs,abtu', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)
eqn3 = np.einsum('abrs,cstb,rtac,absr,acrt', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
MP3corr_E = eqn1 + eqn2 + eqn3
MP3total_E = MP2total_E + MP3corr_E
print('\nMP3 correlation energy: %16.10f' % MP3corr_E)
print('MP3 total energy: %16.10f' % MP3total_E)
# Compare to Psi4
psi4.compare_values(psi4.energy('MP3'), MP3total_E, 6, 'MP3 Energy')
|
StarcoderdataPython
|
5114819
|
class FbsIterateItem():
def __init__(self, formula, parent, step, inv_nf):
self.formula = formula
self.parent = parent
self.step = step
self.inv_nf = inv_nf
self.remained = 1
self.childs = []
|
StarcoderdataPython
|
8156173
|
<filename>sidewalkify/cli.py
"""Handle data fetching/cleaning tasks automatically. Reads and writes from a
pseudo-database in the filesystem, organized as ./cities/<city>/
"""
import click
# TODO: Add type hints for geopandas
import geopandas as gpd # type: ignore
from . import graph
from . import draw
@click.command()
@click.argument("infile")
@click.argument("outfile")
@click.option("--driver", default="GeoJSON")
@click.option("--precision", default=1)
def sidewalkify(
infile: str, outfile: str, driver: str, precision: int
) -> None:
gdf = gpd.read_file(infile)
crs = gdf.crs
G = graph.create_graph(gdf, precision=precision)
paths = graph.find_paths(G)
sidewalks = draw.draw_sidewalks(paths, crs=crs)
sidewalks.to_file(outfile, driver=driver)
|
StarcoderdataPython
|
3359183
|
<gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ResourceGroupPreparer, JMESPathCheck
from azure.cli.testsdk import ScenarioTest
from .scenario_mixin import CdnScenarioMixin
class CdnEndpointScenarioTest(CdnScenarioMixin, ScenarioTest):
@ResourceGroupPreparer()
def test_endpoint_crud(self, resource_group):
from knack.util import CLIError
profile_name = 'profile123'
self.endpoint_list_cmd(resource_group, profile_name, expect_failure=True)
self.profile_create_cmd(resource_group, profile_name)
list_checks = [JMESPathCheck('length(@)', 0)]
self.endpoint_list_cmd(resource_group, profile_name, checks=list_checks)
endpoint_name = self.create_random_name(prefix='endpoint', length=24)
origin = 'www.example.com'
checks = [JMESPathCheck('name', endpoint_name),
JMESPathCheck('origins[0].hostName', origin),
JMESPathCheck('isHttpAllowed', True),
JMESPathCheck('isHttpsAllowed', True),
JMESPathCheck('isCompressionEnabled', False),
JMESPathCheck('queryStringCachingBehavior', 'IgnoreQueryString')]
self.endpoint_create_cmd(resource_group, endpoint_name, profile_name, origin, checks=checks)
list_checks = [JMESPathCheck('length(@)', 1)]
self.endpoint_list_cmd(resource_group, profile_name, checks=list_checks)
update_checks = [JMESPathCheck('name', endpoint_name),
JMESPathCheck('origins[0].hostName', origin),
JMESPathCheck('isHttpAllowed', False),
JMESPathCheck('isHttpsAllowed', True),
JMESPathCheck('isCompressionEnabled', True),
JMESPathCheck('queryStringCachingBehavior', 'IgnoreQueryString')]
options = '--no-http --enable-compression'
self.endpoint_update_cmd(resource_group,
endpoint_name,
profile_name,
options=options,
checks=update_checks)
update_checks = [JMESPathCheck('name', endpoint_name),
JMESPathCheck('origins[0].hostName', origin),
JMESPathCheck('isHttpAllowed', True),
JMESPathCheck('isHttpsAllowed', False),
JMESPathCheck('isCompressionEnabled', False),
JMESPathCheck('queryStringCachingBehavior', 'IgnoreQueryString')]
options = '--no-http false --no-https --enable-compression false'
self.endpoint_update_cmd(resource_group,
endpoint_name,
profile_name,
options=options,
checks=update_checks)
self.endpoint_delete_cmd(resource_group, endpoint_name, profile_name)
@ResourceGroupPreparer()
def test_endpoint_start_and_stop(self, resource_group):
profile_name = 'profile123'
self.profile_create_cmd(resource_group, profile_name)
endpoint_name = self.create_random_name(prefix='endpoint', length=24)
origin = 'www.example.com'
self.endpoint_create_cmd(resource_group, endpoint_name, profile_name, origin)
checks = [JMESPathCheck('resourceState', 'Stopped')]
self.endpoint_stop_cmd(resource_group, endpoint_name, profile_name)
self.endpoint_show_cmd(resource_group, endpoint_name, profile_name, checks=checks)
checks = [JMESPathCheck('resourceState', 'Running')]
self.endpoint_start_cmd(resource_group, endpoint_name, profile_name)
self.endpoint_show_cmd(resource_group, endpoint_name, profile_name, checks=checks)
@ResourceGroupPreparer()
def test_endpoint_load_and_purge(self, resource_group):
profile_name = 'profile123'
self.profile_create_cmd(resource_group, profile_name, options='--sku Standard_Verizon')
endpoint_name = self.create_random_name(prefix='endpoint', length=24)
origin = 'www.example.com'
self.endpoint_create_cmd(resource_group, endpoint_name, profile_name, origin)
content_paths = ['/index.html', '/javascript/app.js']
self.endpoint_load_cmd(resource_group, endpoint_name, profile_name, content_paths)
content_paths = ['/index.html', '/javascript/*']
self.endpoint_purge_cmd(resource_group, endpoint_name, profile_name, content_paths)
|
StarcoderdataPython
|
5161276
|
import numpy as np
from holoviews.core import (HoloMap, GridSpace, Layout, Empty, Dataset,
NdOverlay, DynamicMap, Dimension)
from holoviews.element import Curve, Image, Points, Histogram
from holoviews.streams import Stream
from .testplot import TestBokehPlot, bokeh_renderer
try:
from bokeh.layouts import Column, Row
from bokeh.models import Div, ToolbarBox, GlyphRenderer, Tabs, Panel, Spacer, GridBox
from bokeh.plotting import Figure
except:
pass
class TestLayoutPlot(TestBokehPlot):
def test_layout_update_visible(self):
hmap = HoloMap({i: Curve(np.arange(i), label='A') for i in range(1, 3)})
hmap2 = HoloMap({i: Curve(np.arange(i), label='B') for i in range(3, 5)})
plot = bokeh_renderer.get_plot(hmap+hmap2)
subplot1, subplot2 = [p for k, p in sorted(plot.subplots.items())]
subplot1 = subplot1.subplots['main']
subplot2 = subplot2.subplots['main']
self.assertTrue(subplot1.handles['glyph_renderer'].visible)
self.assertFalse(subplot2.handles['glyph_renderer'].visible)
plot.update((4,))
self.assertFalse(subplot1.handles['glyph_renderer'].visible)
self.assertTrue(subplot2.handles['glyph_renderer'].visible)
def test_layout_title(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
plot = bokeh_renderer.get_plot(hmap1+hmap2)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = ('<span style="color:black;font-family:Arial;font-style:bold;'
'font-weight:bold;font-size:12pt">Default: 0</span>')
self.assertEqual(title.text, text)
def test_layout_title_fontsize(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
layout = Layout([hmap1, hmap2]).opts(plot=dict(fontsize={'title': '12pt'}))
plot = bokeh_renderer.get_plot(layout)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = ('<span style="color:black;font-family:Arial;font-style:bold;'
'font-weight:bold;font-size:12pt">Default: 0</span>')
self.assertEqual(title.text, text)
def test_layout_title_show_title_false(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
layout = Layout([hmap1, hmap2]).opts(plot=dict(show_title=False))
plot = bokeh_renderer.get_plot(layout)
self.assertTrue('title' not in plot.handles)
def test_layout_title_update(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
plot = bokeh_renderer.get_plot(hmap1+hmap2)
plot.update(1)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = ('<span style="color:black;font-family:Arial;font-style:bold;'
'font-weight:bold;font-size:12pt">Default: 1</span>')
self.assertEqual(title.text, text)
def test_layout_gridspaces(self):
layout = (GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4)}) +
GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4)}) +
Curve(range(10))).cols(2)
layout_plot = bokeh_renderer.get_plot(layout)
plot = layout_plot.state
# Unpack until getting down to two rows
self.assertIsInstance(plot, Column)
self.assertEqual(len(plot.children), 2)
toolbar, grid = plot.children
self.assertIsInstance(toolbar, ToolbarBox)
self.assertIsInstance(grid, GridBox)
self.assertEqual(len(grid.children), 3)
(col1, _, _), (col2, _, _), (fig, _, _) = grid.children
self.assertIsInstance(col1, Column)
self.assertIsInstance(col2, Column)
grid1 = col1.children[0]
grid2 = col2.children[0]
# Check the row of GridSpaces
self.assertEqual(len(grid1.children), 3)
_, (col1, _, _), _ = grid1.children
self.assertIsInstance(col1, Column)
inner_grid1 = col1.children[0]
self.assertEqual(len(grid2.children), 3)
_, (col2, _, _), _ = grid2.children
self.assertIsInstance(col2, Column)
inner_grid2 = col2.children[0]
for grid in [inner_grid1, inner_grid2]:
self.assertEqual(len(grid.children), 4)
(gfig1, _, _), (gfig2, _, _), (gfig3, _, _), (gfig4, _, _) = grid.children
self.assertIsInstance(gfig1, Figure)
self.assertIsInstance(gfig2, Figure)
self.assertIsInstance(gfig3, Figure)
self.assertIsInstance(gfig4, Figure)
def test_layout_instantiate_subplots(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = bokeh_renderer.get_plot(layout)
positions = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_layout_instantiate_subplots_transposed(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = bokeh_renderer.get_plot(layout(plot=dict(transpose=True)))
positions = [(0, 0), (0, 1), (1, 0), (2, 0), (3, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_empty_adjoint_plot(self):
adjoint = Curve([0,1,1,2,3]) << Empty() << Curve([0,1,1,0,1])
plot = bokeh_renderer.get_plot(adjoint)
adjoint_plot = plot.subplots[(0, 0)]
self.assertEqual(len(adjoint_plot.subplots), 3)
grid = plot.state.children[1]
(f1, _, _), (f2, _, _), (s1, _, _) = grid.children
self.assertIsInstance(s1, Spacer)
self.assertEqual(s1.width, 0)
self.assertEqual(s1.height, 0)
self.assertEqual(f1.plot_height, f2.plot_height)
def test_layout_plot_with_adjoints(self):
layout = (Curve([]) + Curve([]).hist()).cols(1)
plot = bokeh_renderer.get_plot(layout)
toolbar, grid = plot.state.children
self.assertIsInstance(toolbar, ToolbarBox)
self.assertIsInstance(grid, GridBox)
for (fig, _, _) in grid.children:
self.assertIsInstance(fig, Figure)
self.assertTrue([len([r for r in f.renderers if isinstance(r, GlyphRenderer)])
for (f, _, _) in grid.children], [1, 1, 1])
def test_layout_plot_tabs_with_adjoints(self):
layout = (Curve([]) + Curve([]).hist()).options(tabs=True)
plot = bokeh_renderer.get_plot(layout)
self.assertIsInstance(plot.state, Tabs)
panel1, panel2 = plot.state.tabs
self.assertIsInstance(panel1, Panel)
self.assertIsInstance(panel2, Panel)
self.assertEqual(panel1.title, 'Curve I')
self.assertEqual(panel2.title, 'AdjointLayout I')
def test_layout_shared_source_synced_update(self):
hmap = HoloMap({i: Dataset({chr(65+j): np.random.rand(i+2)
for j in range(4)}, kdims=['A', 'B', 'C', 'D'])
for i in range(3)})
# Create two holomaps of points sharing the same data source
hmap1= hmap.map(lambda x: Points(x.clone(kdims=['A', 'B'])), Dataset)
hmap2 = hmap.map(lambda x: Points(x.clone(kdims=['D', 'C'])), Dataset)
# Pop key (1,) for one of the HoloMaps and make Layout
hmap2.pop((1,))
layout = (hmap1 + hmap2).opts(plot=dict(shared_datasource=True))
# Get plot
plot = bokeh_renderer.get_plot(layout)
# Check plot created shared data source and recorded expected columns
sources = plot.handles.get('shared_sources', [])
source_cols = plot.handles.get('source_cols', {})
self.assertEqual(len(sources), 1)
source = sources[0]
data = source.data
cols = source_cols[id(source)]
self.assertEqual(set(cols), {'A', 'B', 'C', 'D'})
# Ensure the source contains the expected columns
self.assertEqual(set(data.keys()), {'A', 'B', 'C', 'D'})
# Update to key (1,) and check the source contains data
# corresponding to hmap1 and filled in NaNs for hmap2,
# which was popped above
plot.update((1,))
self.assertEqual(data['A'], hmap1[1].dimension_values(0))
self.assertEqual(data['B'], hmap1[1].dimension_values(1))
self.assertEqual(data['C'], np.full_like(hmap1[1].dimension_values(0), np.NaN))
self.assertEqual(data['D'], np.full_like(hmap1[1].dimension_values(0), np.NaN))
def test_shared_axes(self):
curve = Curve(range(10))
img = Image(np.random.rand(10,10))
plot = bokeh_renderer.get_plot(curve+img)
plot = plot.subplots[(0, 1)].subplots['main']
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual((x_range.start, x_range.end), (-.5, 9))
self.assertEqual((y_range.start, y_range.end), (-.5, 9))
def test_shared_axes_disable(self):
curve = Curve(range(10))
img = Image(np.random.rand(10,10)).opts(plot=dict(shared_axes=False))
plot = bokeh_renderer.get_plot(curve+img)
plot = plot.subplots[(0, 1)].subplots['main']
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual((x_range.start, x_range.end), (-.5, .5))
self.assertEqual((y_range.start, y_range.end), (-.5, .5))
def test_layout_empty_subplots(self):
layout = Curve(range(10)) + NdOverlay() + HoloMap() + HoloMap({1: Image(np.random.rand(10,10))})
plot = bokeh_renderer.get_plot(layout)
self.assertEqual(len(plot.subplots.values()), 2)
def test_layout_set_toolbar_location(self):
layout = (Curve([]) + Points([])).options(toolbar='left')
plot = bokeh_renderer.get_plot(layout)
self.assertIsInstance(plot.state, Row)
self.assertIsInstance(plot.state.children[0], ToolbarBox)
def test_layout_disable_toolbar(self):
layout = (Curve([]) + Points([])).options(toolbar=None)
plot = bokeh_renderer.get_plot(layout)
self.assertIsInstance(plot.state, GridBox)
self.assertEqual(len(plot.state.children), 2)
def test_layout_shared_inverted_yaxis(self):
layout = (Curve([]) + Curve([])).options('Curve', invert_yaxis=True)
plot = bokeh_renderer.get_plot(layout)
subplot = list(plot.subplots.values())[0].subplots['main']
self.assertEqual(subplot.handles['y_range'].start, 1)
self.assertEqual(subplot.handles['y_range'].end, 0)
def test_layout_dimensioned_stream_title_update(self):
stream = Stream.define('Test', test=0)()
dmap = DynamicMap(lambda test: Curve([]), kdims=['test'], streams=[stream])
layout = dmap + Curve([])
plot = bokeh_renderer.get_plot(layout)
self.assertIn('test: 0', plot.handles['title'].text)
stream.event(test=1)
self.assertIn('test: 1', plot.handles['title'].text)
plot.cleanup()
self.assertEqual(stream._subscribers, [])
def test_layout_axis_link_matching_name_label(self):
layout = Curve([1, 2, 3], vdims=('a', 'A')) + Curve([1, 2, 3], vdims=('a', 'A'))
plot = bokeh_renderer.get_plot(layout)
p1, p2 = (sp.subplots['main'] for sp in plot.subplots.values())
self.assertIs(p1.handles['y_range'], p2.handles['y_range'])
def test_layout_axis_not_linked_mismatching_name(self):
layout = Curve([1, 2, 3], vdims=('b', 'A')) + Curve([1, 2, 3], vdims=('a', 'A'))
plot = bokeh_renderer.get_plot(layout)
p1, p2 = (sp.subplots['main'] for sp in plot.subplots.values())
self.assertIsNot(p1.handles['y_range'], p2.handles['y_range'])
def test_layout_axis_linked_unit_and_no_unit(self):
layout = (Curve([1, 2, 3], vdims=Dimension('length', unit='m')) +
Curve([1, 2, 3], vdims='length'))
plot = bokeh_renderer.get_plot(layout)
p1, p2 = (sp.subplots['main'] for sp in plot.subplots.values())
self.assertIs(p1.handles['y_range'], p2.handles['y_range'])
def test_layout_axis_not_linked_mismatching_unit(self):
layout = (Curve([1, 2, 3], vdims=Dimension('length', unit='m')) +
Curve([1, 2, 3], vdims=Dimension('length', unit='cm')))
plot = bokeh_renderer.get_plot(layout)
p1, p2 = (sp.subplots['main'] for sp in plot.subplots.values())
self.assertIsNot(p1.handles['y_range'], p2.handles['y_range'])
def test_dimensioned_streams_with_dynamic_callback_returns_layout(self):
stream = Stream.define('aname', aname='a')()
def cb(aname):
x = np.linspace(0, 1, 10)
y = np.random.randn(10)
curve = Curve((x, y), group=aname)
hist = Histogram(y)
return (curve + hist).opts(shared_axes=False)
m = DynamicMap(cb, kdims=['aname'], streams=[stream])
p = bokeh_renderer.get_plot(m)
T = 'XYZT'
stream.event(aname=T)
self.assertIn('aname: ' + T, p.handles['title'].text, p.handles['title'].text)
p.cleanup()
self.assertEqual(stream._subscribers, [])
|
StarcoderdataPython
|
4862717
|
<reponame>Lenus254/password-locker
from credentials import Credentials
import unittest
user_credentials = []
class TestCredentials(unittest.TestCase):
def tearDown(self):
'''
this test clears the credentialss list after every test
'''
Credentials.user_credentials = []
def setUp(self):
'''
this test creates a new instance of the credential class
before each test
'''
self.new_credential = Credentials('twitter', '<PASSWORD>')
def test_init(self):
'''
this test checks whether the data enterd into the properties if called wll appear
'''
self.assertEqual(self.new_credential.site_name, 'twitter')
self.assertEqual(self.new_credential.password, '<PASSWORD>')
def test_save_credential(self):
'''
this is a test to check whether the credentials are appended to the credential list
'''
self.new_credential.save_credentials()
self.assertEqual(len(Credentials.user_credentials), 1)
def test_save_multiple(self):
'''
Test function to test whetther several credentials can be appended to credentials list
'''
self.new_credential.save_credentials()
test_credential = Credentials('facebook', '54321')
test_credential.save_credentials()
self.assertEqual(len(Credentials.user_credentials), 2)
def test_delete_credentials(self):
'''
checks whether delete function is working to remove credentials
'''
self.new_credential.save_credentials()
test_credential = Credentials('facebook', '54321')
test_credential.save_credentials()
self.new_credential.delete_credentials()
self.assertEqual(len(Credentials.user_credentials), 1)
def test_display_credentials(self):
self.assertEqual(Credentials.display_credentials(), Credentials.user_credentials)
def test_search_credential(self):
'''
this test checks whether saved credentials can be searched
'''
self.new_credential.save_credentials()
test_credential = Credentials('facebook', '54321') # credential
test_credential.save_credentials()
found_credentials = Credentials.search_by_site('facebook')
self.assertEqual(found_credentials.site_name, test_credential.site_name)
def test_credentials_exists(self):
'''
return boolean true if the password searched if found
'''
self.new_credential.save_credentials()
test_credential = Credentials('facebook', '54321') # new credential
test_credential.save_credentials()
site_exist = Credentials.credential_exists('facebook')
self.assertTrue(site_exist)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4927197
|
<gh_stars>0
# LookWell v0.0
# srl
from lookwell import ItemList, Mill
def get_list0():
list = ItemList({
"items": [
{
"tags": ["upc/0001", "food/organic/apple"],
"desc": "2# bag of fuji apple",
"unit": {
"unit": "pound",
"qty": 2,
"container": "bag"
},
}
]
})
return list
def test_itemlist():
list = get_list0()
assert list is not None
item = list.findByTag("upc/0001")
assert item is not None
assert item.unitstr() == "2 pound bag"
def test_mill():
mill = Mill(get_list0())
assert mill.qtyByTag('upc/0001') is None
mill.process({
"date": "2021-03-01"
})
assert mill.qtyByTag('upc/0001') is None
mill.process({
"buy": {
"tag": "upc/0001",
"qty": 2,
}
})
assert mill.qtyByTag("upc/0001").getInUnit("pound") == 4
mill.process({
"date": "2021-03-02",
"census": [
{"tag": "upc/0001",
"qty": {"unit": "each", "qty": 9}}]})
assert mill.qtyByTag("upc/001").getInUnit("each") == 9
mill.process({
"date": "2021-03-03",
"eat": [
{"tag": "upc/0001",
"qty": {"unit": "each", "qty": 2}}]})
assert mill.qtyByTag("upc/001").getInUnit("each") == 7
|
StarcoderdataPython
|
3558175
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0115_auto_20160323_0756'),
]
operations = [
migrations.RenameField(
model_name='card',
old_name='skill_up_cards',
new_name='_skill_up_cards',
),
]
|
StarcoderdataPython
|
8196801
|
import copy
import time
import numpy
import theano
import theano.tensor as T
import lasagne
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state, check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
from sklearn.metrics import f1_score, roc_auc_score
from classifiers.utils import split_train_data, iterate
class CNNClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, layers: dict=None, dropout: float=0.5, learning_rate: float=2e-3, max_epochs_number: int=1000,
epochs_before_stopping: int=10, validation_fraction: float=None, beta1: float=0.9, beta2: float=0.999,
epsilon: float=1e-08, batch_size: int=500, batch_norm: bool=True, verbose: bool=False,
warm_start: bool=False, random_state=None, eval_metric: str='Logloss'):
""" Создать свёрточную нейросеть типа LeNet, состоящую из чередующихся слоёв свёртки и макс-пулинга.
Структура свёрточной сети задаётся словарём layers. В этом словаре должно быть три ключа "conv", "pool" и
"dense", описывающих, соответственно, параметры свёрточных слоёв, слоёв пулинга и полносвязных слоёв
(предполагается, что любая свёрточная сеть после серии чередующихся слоёв свёртки и подвыборки завершается хотя
бы одним скрытым полносвязным слоем).
Структура слоёв свёртки описывается N-элементым кортежем или списком, каждый элемент которого, в свою очередь,
также представляет собой кортеж или список, но только из двух элементов: количества карт признаков в свёрточном
слое и размера рецептивного поля. Размер рецептивного поля - это, как нетрудно догадаться, тоже 2-элементный
кортеж или список, в котором заданы ширина и высота рецептивного поля для всех нейронов всех карт признаков
свёрточного слоя. Если один из размеров рецептивного поля установлен в 0, то используется соответствующий
размер (ширина или высота) карты признаков предыдущего слоя (или входной карты, если текущий слой - первый).
Пример возможной структуры слоёв свёртки: (32, (5, 0), (64, (2, 0))). В данном примере указано, что свёрточная
сеть должна иметь два слоя свёртки. Первый из слоёв включает в себя 32 карты признаков, а все нейроны этого
слоя имеют рецептивное поле высотой 5 элементов, а шириной - во всю ширину входной карты (тем самым двумерная
свёртка элегантным движением превращается в одномерную - только по высоте, но не по ширине). Второй из
свёрточных слоёв включает в себя уже 64 карты признаков, а все нейроны этого слоя имеют рецептивное поле высотой
5 элементов, а шириной во всю ширину карты признаков предшествующего слоя пулинга.
Структура слоёв подвыборки также описывается N-элементым кортежем или списком, причём количество слоёв пулинга
должно строго совпадать с количеством слоёв свёртки, поскольку в свёрточной сети за каждым слоем свёртки
обязательно следует слой пулинга. Но каждый элемент, описывающий отдельный слой пулинга, отличается от элемента
для описания слоя свёртки. Дело в том, что число карт признаков слоя пулинга всегда равно числу карт признаков
предшествующего ему слоя свёртки, и, таким образом, дополнительно указывать это число при описании слоя пулинга
не имеет смысла. В таком случае один элемент, описывающий отдельный слой пулинга - это 2-элеметный кортеж или
список, задающий размер рецептивного поля всех нейронов слоя пулинга - ширину и высоту. И ширина, и высота
должны быть положительными вещественными числами. Они определяют, во сколько раз слой пулинга будет сжимать
карту признаков предшествующего ему слоя свёртки по высоте и ширине соответственно. Пример возможной структуры
слоёв пулинга: ((3, 1), (2, 1)). В данном примере указано, что свёрточная сеть должна иметь два слоя пулинга.
Первый из них сжимает карту признаков предшествующего ему слоя свёртки в три раза по высоте, а ширину оставляет
неизменной. Второй же слой пулинга сжимает карту признаков предшествующего ему слоя свёртки в два раза по
высоте, а ширину также оставляет неизменной.
Структура полносвязных скрытых слоёв - это просто кортеж или список положительных целых чисел, например,
(300, 100), что означает 300 нейронов в первом полносвязном скрытом слое и 100 нейронов во втором полносвязном
скрытом слое. Число нейронов в выходном слое всегда определяется автоматически на основе числа распознаваемых
классов.
Все функции активации имеют тип ReLU, кроме функций активации нейронов выходного слоя. Для выходного же слоя
используется либо логистическая сигмоида, если число распознаваемых классов равно двум, либо же SOFTMAX-функция
активации, если число распознаваемых классов больше двух.
Дропаут применяется только к полносвязной части свёрточной нейронной сети (к слоям свёртки и подвыборки
применять дропаут бессмысленно и даже вредно).
В качестве алгоритма обучения используется Adamax, параметры которого доступны для настройки.
Начать обучение свёрточной нейронной сети можно как "с нуля", инициализировав все веса случайным образом, так и
со старых значений весов, доставшихся "в наследство" от прошлых экспериментов (этим управляется параметром
warm_start, который должен быть установлен в True, если мы хотим использовать старые значения весов в начале
процедуры обучения).
В процессе обучения для предотвращения переобучения (overfitting) может быть использован критерий раннего
останова (early stopping). Этот критерий автоматически включается, если параметр validation_fraction не является
None либо же если в метод fit() в качестве дополнительного аргумента "validation" передано контрольное
(валидационное) множество примеров (см. комментарии к методу fit()). При включении критерия раннего останова
процесс обучения продолжается до тех пор, пока ошибка обобщения не прекратит убывать на протяжении последних
epochs_before_stopping эпох обучения (таким образом, параметр epochs_before_stopping определяет некое "терпение"
нейросети в режиме раннего останова). Если же режим раннего останова не используется, то обучение нейросети
будет продолжаться до тех пор, пока ошибка обучения (а не обобщения!) не прекратит убывать на протяжении
того же количества epochs_before_stopping эпох подряд. Но в обоих случаях число эпох обучения не может превысить
величину max_epochs_number (при достижении этого числа эпох обучение прекращается безотносительно выполнения
других критериев).
:param layers: Структура слоёв свёрточной сети (словарь с тремя ключами "conv", "pool" и "dense").
:param dropout: Коэффициент дропаута - вещественное число больше 0, но меньше 1.
:param learning_rate: Коэффициент скорости обучения для алгоритма Adamax - положительное вещественное число.
:param max_epochs_number: Максимальное число эпох обучения (положительное целое число).
:param epochs_before_stopping: Максимальное "терпение" сети в методе раннего останова (early stopping).
:param validation_fraction: Доля примеров обучающего множества для расчёта ошибки обобщения при early stopping.
:param beta1: Параметр алгоритма Adamax.
:param beta2: Параметр алгоритма Adamax.
:param epsilon: Параметр алгоритма Adamax.
:param batch_size: Размер одного "минибатча" при обучении и эксплуатации сети - положительное целое число.
:param batch_norm: Флажок, указывающий, надо ли использовать батч-нормализацию при обучении.
:param verbose: Флажок, указывающий, надо ли логгировать процесс обучения (печатать на экран с помощью print).
:param warm_start: Флажок, указывающий, надо ли начинать обучение со старых значений весов.
:param random_state: Генератор случайных чисел (нужен, прежде всего, для отладки).
:param eval_metric: Метрика, используемая для оценки способности к обобщению ("Logloss", "F1", "ROC-AUC").
"""
super().__init__()
self.layers = layers
self.dropout = dropout
self.learning_rate = learning_rate
self.max_epochs_number = max_epochs_number
self.validation_fraction = validation_fraction
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.verbose = verbose
self.epochs_before_stopping = epochs_before_stopping
self.batch_size = batch_size
self.batch_norm = batch_norm
self.warm_start = warm_start
self.random_state = random_state
self.eval_metric = eval_metric
def fit(self, X, y, **fit_params):
""" Обучить свёрточную нейросеть на заданном множестве примеров: входных примеров X и соответствующих меток y.
В процессе обучения, чтобы применить критерий раннего останова, можно задать контрольное (валидационное)
множество, на котором будет вычисляться ошибка обобщения. Это можно сделать с использованием необязательного
аргумента validation. Если этот аргумент указан, то он должен представлять собой двухэлементый кортеж или
список, первым элементом которого является множество входных примеров X_val, а вторым элементов - множество
меток классов y_val. X_val, как и множество обучающих примеров X, должно быть 4-мерным numpy.ndarray-массивом,
а y_val, как и y, должно быть 1-мерным numpy.ndarray-массивом меток классов.
Первое измерение массивов X и X_val - это число примеров. Соответственно, первое измерение X должно быть равно
первому (и единственному) измерению y, а первое измерение X_val - первому (и единственному) измерению y_val.
Если необязательный аргумент validation не указан, то контрольное (валидационное) множество автоматически
случайным образом отшипывается от обучающего множества в пропорции, заданной параметром validation_fraction.
Если же и необязательный аргумент validation не указан, и параметр validation_fraction установлен в None, то
критерий раннего останова не используется.
:param X: 4-мерный numpy.ndarray-массив (1-е измерение - обучающие примеры, а остальные - размеры примера).
:param y: 1-мерный numpy.ndarray-массив меток классов для каждого примера (метка - это целое неотрицательное).
:param validation: Необязательный параметр, задающий контрольное (валидационное) множество для early stopping.
:return self.
"""
self.check_params(**self.get_params(deep=False))
X, y = self.check_train_data(X, y)
input_structure = X.shape[1:]
self.random_state = check_random_state(self.random_state)
classes_list = sorted(list(set(y.tolist())))
if self.warm_start:
check_is_fitted(self, ['cnn_', 'predict_fn_', 'n_iter_', 'input_size_', 'loss_value_', 'classes_list_'])
if X.shape[1:] != self.input_size_:
raise ValueError('Samples of `X` do not correspond to the input structure! '
'Got {0}, expected {1}'.format(X.shape[1:], self.input_size_))
if self.classes_list_ != classes_list:
raise ValueError('List of classes is wrong. Got {0}, expected {1}.'.format(
classes_list, self.classes_list_
))
old_params = lasagne.layers.get_all_param_values(self.cnn_)
else:
old_params = None
if (len(classes_list) > 2) and (self.eval_metric == 'ROC-AUC'):
raise ValueError('You can not use `ROC-AUC` metric for early stopping '
'if number of classes is greater than 2.')
if (not hasattr(self, 'cnn_train_fn_')) or (not hasattr(self, 'cnn_val_fn_')) or \
(not hasattr(self, 'predict_fn_')) or (not hasattr(self, 'cnn_')):
cnn_input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
self.cnn_, _ = self.build_cnn(input_structure, len(classes_list), cnn_input_var)
train_loss, _ = self.build_loss(len(classes_list), target_var, self.cnn_, False)
params = lasagne.layers.get_all_params(self.cnn_, trainable=True)
updates = lasagne.updates.adamax(train_loss, params, learning_rate=self.learning_rate, beta1=self.beta1,
beta2=self.beta2, epsilon=self.epsilon)
self.cnn_train_fn_ = theano.function([cnn_input_var, target_var], train_loss, updates=updates,
allow_input_downcast=True)
test_loss, test_prediction = self.build_loss(len(classes_list), target_var, self.cnn_, True)
self.cnn_val_fn_ = theano.function([cnn_input_var, target_var], test_loss, allow_input_downcast=True)
self.predict_fn_ = theano.function([cnn_input_var], test_prediction, allow_input_downcast=True)
if old_params is not None:
lasagne.layers.set_all_param_values(self.cnn_, old_params)
if 'validation' in fit_params:
if (not isinstance(fit_params['validation'], tuple)) and (not isinstance(fit_params['validation'], list)):
raise ValueError('Validation data are specified incorrectly!')
if len(fit_params['validation']) != 2:
raise ValueError('Validation data are specified incorrectly!')
X_val, y_val = self.check_train_data(fit_params['validation'][0], fit_params['validation'][1])
if X.shape[1:] != X_val.shape[1:]:
raise ValueError('Validation inputs do not correspond to train inputs!')
if set(y.tolist()) != set(y_val.tolist()):
raise ValueError('Validation targets do not correspond to train targets!')
train_indices = numpy.arange(0, X.shape[0], 1, numpy.int32)
val_indices = numpy.arange(0, X_val.shape[0], 1, numpy.int32)
elif self.validation_fraction is not None:
n = int(round(self.validation_fraction * X.shape[0]))
if (n <= 0) or (n >= X.shape[0]):
raise ValueError('Train data cannot be split into train and validation subsets!')
X_val = None
y_val = None
train_indices, val_indices = split_train_data(y, n, self.random_state)
else:
X_val = None
y_val = None
train_indices = numpy.arange(0, X.shape[0], 1, numpy.int32)
val_indices = None
if self.verbose:
print("")
print("Training is started...")
best_eval_metric = None
cur_eval_metric = None
best_params = None
best_epoch_ind = None
early_stopping = False
for epoch_ind in range(self.max_epochs_number):
train_err = 0
start_time = time.time()
for batch in self.__iterate_minibatches(X, y, train_indices, shuffle=True):
inputs, targets = batch
train_err += self.cnn_train_fn_(inputs, targets)
train_err /= train_indices.shape[0]
val_err = 0.0
if val_indices is None:
if best_eval_metric is None:
best_epoch_ind = epoch_ind
best_eval_metric = train_err
best_params = lasagne.layers.get_all_param_values(self.cnn_)
elif train_err < best_eval_metric:
best_epoch_ind = epoch_ind
best_eval_metric = train_err
best_params = lasagne.layers.get_all_param_values(self.cnn_)
else:
val_err = 0
if X_val is None:
for batch in self.__iterate_minibatches(X, y, val_indices, shuffle=False):
inputs, targets = batch
val_err += self.cnn_val_fn_(inputs, targets)
else:
for batch in self.__iterate_minibatches(X_val, y_val, val_indices, shuffle=False):
inputs, targets = batch
val_err += self.cnn_val_fn_(inputs, targets)
val_err /= val_indices.shape[0]
if self.eval_metric == 'Logloss':
cur_eval_metric = val_err
if best_eval_metric is None:
best_epoch_ind = epoch_ind
best_eval_metric = cur_eval_metric
best_params = lasagne.layers.get_all_param_values(self.cnn_)
elif cur_eval_metric < best_eval_metric:
best_epoch_ind = epoch_ind
best_eval_metric = cur_eval_metric
best_params = lasagne.layers.get_all_param_values(self.cnn_)
else:
if self.eval_metric == 'F1':
if X_val is None:
cur_eval_metric = f1_score(
y[val_indices],
self.__predict(X[val_indices], len(classes_list)),
average=('binary' if len(classes_list) < 3 else 'macro')
)
else:
cur_eval_metric = f1_score(
y_val,
self.__predict(X_val, len(classes_list)),
average=('binary' if len(classes_list) < 3 else 'macro')
)
else:
if X_val is None:
cur_eval_metric = roc_auc_score(
y[val_indices],
self.__predict_proba(X[val_indices], len(classes_list))[:, 1]
)
else:
cur_eval_metric = roc_auc_score(
y_val,
self.__predict_proba(X_val, len(classes_list))[:, 1]
)
if best_eval_metric is None:
best_epoch_ind = epoch_ind
best_eval_metric = cur_eval_metric
best_params = lasagne.layers.get_all_param_values(self.cnn_)
elif cur_eval_metric > best_eval_metric:
best_epoch_ind = epoch_ind
best_eval_metric = cur_eval_metric
best_params = lasagne.layers.get_all_param_values(self.cnn_)
if self.verbose:
print("Epoch {} of {} took {:.3f}s".format(
epoch_ind + 1, self.max_epochs_number, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err))
if val_indices is not None:
print(" validation loss:\t\t{:.6f}".format(val_err))
if self.eval_metric != 'Logloss':
print(" validation {}:\t\t{:.6f}".format(self.eval_metric, cur_eval_metric))
if best_epoch_ind is not None:
if (epoch_ind - best_epoch_ind) >= self.epochs_before_stopping:
early_stopping = True
break
if best_params is None:
raise ValueError('The multilayer perceptron cannot be trained!')
self.loss_value_ = best_eval_metric
if self.warm_start:
self.n_iter_ += (best_epoch_ind + 1)
else:
self.n_iter_ = best_epoch_ind + 1
lasagne.layers.set_all_param_values(self.cnn_, best_params)
del best_params
self.input_size_ = input_structure
if self.verbose:
if early_stopping:
print('Training is stopped according to the early stopping criterion.')
else:
print('Training is stopped according to the exceeding of maximal epochs number.')
self.classes_list_ = classes_list
return self
def __predict(self, X, n_classes):
""" Распознать обученной нейросетью заданное множество входных примеров X.
Перед распознаванием ничего не проверять - ни корректность параметров нейросети, ни правильность входных данных.
:param X: 4-мерный numpy.ndarray-массив (1-е измерение - тестовые примеры, а остальные - размеры примера).
:param n_classes: количество распознаваемых классов.
:return 1-мерный numpy.ndarray-массив распознанных меток классов(целых чисел), равный по длине 1-му измерению X.
"""
n_samples = X.shape[0]
y_pred = numpy.zeros((n_samples,), dtype=numpy.int32)
if n_classes > 2:
sample_ind = 0
for batch in self.__iterate_minibatches_for_prediction(X):
inputs = batch
outputs = self.predict_fn_(inputs)
n_outputs = outputs.shape[0]
if sample_ind + n_outputs <= n_samples:
y_pred[sample_ind:(sample_ind + n_outputs)] = outputs.argmax(axis=1).astype(y_pred.dtype)
else:
y_pred[sample_ind:n_samples] = outputs[:(n_samples - sample_ind)].argmax(axis=1).astype(
y_pred.dtype)
sample_ind += n_outputs
else:
sample_ind = 0
for batch in self.__iterate_minibatches_for_prediction(X):
inputs = batch
outputs = self.predict_fn_(inputs)
n_outputs = outputs.shape[0]
if sample_ind + n_outputs <= n_samples:
y_pred[sample_ind:(sample_ind + n_outputs)] = (outputs >= 0.5).astype(y_pred.dtype)
else:
y_pred[sample_ind:n_samples] = (outputs[:(n_samples - sample_ind)] >= 0.5).astype(y_pred.dtype)
sample_ind += n_outputs
return y_pred
def predict(self, X):
""" Распознать обученной нейросетью заданное множество входных примеров X.
Перед распознаванием проверить корректность установки всех параметров нейросети и правильность задания множества
входных примеров.
:param X: 4-мерный numpy.ndarray-массив (1-е измерение - тестовые примеры, а остальные - размеры примера).
:return 1-мерный numpy.ndarray-массив распознанных меток классов(целых чисел), равный по длине 1-му измерению X.
"""
check_is_fitted(self, ['cnn_', 'predict_fn_', 'n_iter_', 'input_size_', 'loss_value_', 'classes_list_'])
X = self.check_input_data(X)
return self.__predict(X, len(self.classes_list_))
def __predict_proba(self, X, n_classes):
""" Вычислить вероятности распознавания классов для заданного множества входных примеров X.
Перед вычислением вероятностей ничего не проверять - ни корректность параметров нейросети, ни правильность
входных данных.
:param X: 4-мерный numpy.ndarray-массив (1-е измерение - тестовые примеры, а остальные - размеры примера).
:param n_classes: количество распознаваемых классов.
:return 2-мерный numpy.ndarray-массив, число строк которого равно 1-му измерению X, а столбцов - числу классов.
"""
n_samples = X.shape[0]
sample_ind = 0
if n_classes > 2:
probabilities = numpy.empty((n_samples, n_classes), dtype=numpy.float32)
for batch in self.__iterate_minibatches_for_prediction(X):
inputs = batch
outputs = self.predict_fn_(inputs)
n_outputs = outputs.shape[0]
if sample_ind + n_outputs <= n_samples:
probabilities[sample_ind:(sample_ind + n_outputs)] = outputs
else:
probabilities[sample_ind:n_samples] = outputs[:(n_samples - sample_ind)]
sample_ind += n_outputs
res = probabilities
else:
probabilities = numpy.empty((n_samples,), dtype=numpy.float32)
for batch in self.__iterate_minibatches_for_prediction(X):
inputs = batch
outputs = self.predict_fn_(inputs)
n_outputs = outputs.shape[0]
if sample_ind + n_outputs <= n_samples:
probabilities[sample_ind:(sample_ind + n_outputs)] = outputs
else:
probabilities[sample_ind:n_samples] = outputs[:(n_samples - sample_ind)]
sample_ind += n_outputs
probabilities = probabilities.reshape((probabilities.shape[0], 1))
res = numpy.hstack((1.0 - probabilities, probabilities))
return res
def predict_proba(self, X):
""" Вычислить вероятности распознавания классов для заданного множества входных примеров X.
Перед вычислением вероятностей проверить корректность установки всех параметров нейросети и правильность задания
множества входных примеров.
:param X: 4-мерный numpy.ndarray-массив (1-е измерение - тестовые примеры, а остальные - размеры примера).
:return 2-мерный numpy.ndarray-массив, число строк которого равно 1-му измерению X, а столбцов - числу классов.
"""
check_is_fitted(self, ['cnn_', 'predict_fn_', 'n_iter_', 'input_size_', 'loss_value_', 'classes_list_'])
X = self.check_input_data(X)
return self.__predict_proba(X, len(self.classes_list_))
def predict_log_proba(self, X):
""" Вычислить логарифмы вероятностей распознавания классов для заданного множества входных примеров X.
:param X: 4-мерный numpy.ndarray-массив (1-е измерение - тестовые примеры, а остальные - размеры примера).
:return 2-мерный numpy.ndarray-массив, число строк которого равно 1-му измерению X, а столбцов - числу классов.
"""
return numpy.log(self.predict_proba(X))
def get_params(self, deep=True):
""" Получить словарь управляющих параметров нейросети.
Данный метод используется внутри sklearn.pipeline.Pipeline, sklearn.model_selection.GridSearchCV и пр.
Соответствено, если мы хотим насладиться всей мощью scikit-learn и использовать наш класс там, то данный метод
нужно корректно реализовать.
:return словарь управляющих параметров нейросети (без параметров, настроенных по итогам обучения).
"""
return {'layers': copy.deepcopy(self.layers) if deep else self.layers, 'dropout': self.dropout,
'learning_rate': self.learning_rate, 'max_epochs_number': self.max_epochs_number,
'validation_fraction': self.validation_fraction, 'epochs_before_stopping': self.epochs_before_stopping,
'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon, 'batch_size': self.batch_size,
'verbose': self.verbose, 'batch_norm': self.batch_norm, 'warm_start': self.warm_start,
'eval_metric': self.eval_metric}
def set_params(self, **parameters):
""" Установить новые значения управляющих параметров нейросети из словаря.
Данный метод используется внутри sklearn.pipeline.Pipeline, sklearn.model_selection.GridSearchCV и пр.
Соответствено, если мы хотим насладиться всей мощью scikit-learn и использовать наш класс там, то данный метод
нужно корректно реализовать.
:param parameters: Названия и значения устанавливаемых параметров, заданные словарём.
:return self
"""
for parameter, value in parameters.items():
self.__setattr__(parameter, value)
return self
@staticmethod
def check_params(**kwargs):
""" Проверить корректность значений всех возможных параметров и, если что, бросить ValueError. """
if not 'layers' in kwargs:
raise ValueError('Structure of hidden layers is not specified!')
if not isinstance(kwargs['layers'], dict):
raise ValueError('Structure of hidden layers must be dictionary consisting from three items!')
if len(kwargs['layers']) != 3:
raise ValueError('Structure of hidden layers must be dictionary consisting from three items!')
if 'conv' not in kwargs['layers']:
raise ValueError('Description of convolution layers (`conv` key) cannot be found in the `layers` dict!')
conv_layers = kwargs['layers']['conv']
if 'pool' not in kwargs['layers']:
raise ValueError('Description of pooling layers (`pool` key) cannot be found in the `layers` dict!')
pooling_layers = kwargs['layers']['pool']
if 'dense' not in kwargs['layers']:
raise ValueError('Description of dense layers (`dense` key) cannot be found in the `layers` dict!')
dense_layers = kwargs['layers']['dense']
if (not isinstance(conv_layers, tuple)) and (not isinstance(conv_layers, list)):
raise ValueError('Structure of convolution layers must be list or tuple!')
if len(conv_layers) < 1:
raise ValueError('List of convolution layers is empty!')
if (not isinstance(pooling_layers, tuple)) and (not isinstance(pooling_layers, list)):
raise ValueError('Structure of pooling layers must be list or tuple!')
if len(pooling_layers) < 1:
raise ValueError('List of pooling layers is empty!')
if (not isinstance(dense_layers, tuple)) and (not isinstance(dense_layers, list)):
raise ValueError('Structure of dense layers must be list or tuple!')
if len(dense_layers) < 1:
raise ValueError('List of dense layers is empty!')
if len(conv_layers) != len(pooling_layers):
raise ValueError('Number of convolution layers must be equal to number of pooling layers!')
for ind in range(len(conv_layers)):
err_msg = 'Structure of convolution layer {0} is wrong!'.format(ind + 1)
if (not isinstance(conv_layers[ind], tuple)) and (not isinstance(conv_layers[ind], list)):
raise ValueError(err_msg)
if len(conv_layers[ind]) != 2:
raise ValueError(err_msg)
if not isinstance(conv_layers[ind][0], int):
raise ValueError(err_msg)
if conv_layers[ind][0] < 1:
raise ValueError(err_msg)
if (not isinstance(conv_layers[ind][1], tuple)) and (not isinstance(conv_layers[ind][1], list)):
raise ValueError(err_msg)
if len(conv_layers[ind][1]) != 2:
raise ValueError(err_msg)
if not isinstance(conv_layers[ind][1][0], int):
raise ValueError(err_msg)
if conv_layers[ind][1][0] < 0:
raise ValueError(err_msg)
if not isinstance(conv_layers[ind][1][1], int):
raise ValueError(err_msg)
if conv_layers[ind][1][1] < 0:
raise ValueError(err_msg)
if (conv_layers[ind][1][0] <= 0) and (conv_layers[ind][1][1] <= 0):
raise ValueError(err_msg)
err_msg = 'Structure of pooling layer {0} is wrong!'.format(ind + 1)
if (not isinstance(pooling_layers[ind], tuple)) and (not isinstance(pooling_layers[ind], list)):
raise ValueError(err_msg)
if len(pooling_layers[ind]) != 2:
raise ValueError(err_msg)
if pooling_layers[ind][0] < 0:
raise ValueError(err_msg)
if pooling_layers[ind][1] < 0:
raise ValueError(err_msg)
if (pooling_layers[ind][0] <= 0) and (pooling_layers[ind][1] <= 0):
raise ValueError(err_msg)
receptive_field_for_pool_layer = (
pooling_layers[ind][0] if pooling_layers[ind][0] > 0 else 1,
pooling_layers[ind][1] if pooling_layers[ind][1] > 0 else 1
)
if (receptive_field_for_pool_layer[0] < 1) or (receptive_field_for_pool_layer[1] < 1):
raise ValueError(err_msg)
n_dense_layers = len(dense_layers)
for layer_ind in range(n_dense_layers):
if (not isinstance(dense_layers[layer_ind], int)) or (dense_layers[layer_ind] < 1):
raise ValueError('Size of fully-connected layer {0} is inadmissible!'.format(layer_ind))
if 'dropout' not in kwargs:
raise ValueError('Dropout probability is not specified!')
if (kwargs['dropout'] <= 0.0) or (kwargs['dropout'] >= 1.0):
raise ValueError('Dropout probability is wrong!')
if 'learning_rate' not in kwargs:
raise ValueError('Learning rate is not specified!')
if kwargs['learning_rate'] <= 0.0:
raise ValueError('Learning rate must be positive value!')
if 'max_epochs_number' not in kwargs:
raise ValueError('Maximal number of train epochs is not specified!')
if (not isinstance(kwargs['max_epochs_number'], int)) or (kwargs['max_epochs_number'] <= 0):
raise ValueError('Maximal number of train epochs must be positive integer value!')
if 'validation_fraction' not in kwargs:
raise ValueError('Validation fraction is not specified!')
if kwargs['validation_fraction'] is not None:
if (kwargs['validation_fraction'] <= 0.0) or (kwargs['validation_fraction'] >= 1.0):
raise ValueError('Validation fraction must be in (0.0, 1.0) or None!')
if not 'beta1' in kwargs:
raise ValueError('Beta1 for the Adamax algorithm is not specified!')
if (kwargs['beta1'] < 0.0) or (kwargs['beta1'] >= 1.0):
raise ValueError('Beta1 for the Adamax algorithm must be in [0.0, 1.0)!')
if not 'beta2' in kwargs:
raise ValueError('Beta2 for the Adamax algorithm is not specified!')
if (kwargs['beta2'] < 0.0) or (kwargs['beta2'] >= 1.0):
raise ValueError('Beta2 for the Adamax algorithm must be in [0.0, 1.0)!')
if 'epsilon' not in kwargs:
raise ValueError('Epsilon for the Adamax algorithm is not specified!')
if kwargs['epsilon'] <= 0.0:
raise ValueError('Epsilon for the Adamax algorithm must be positive value!')
if not 'batch_size' in kwargs:
raise ValueError('Batch size is not specified!')
if (not isinstance(kwargs['batch_size'], int)) or (kwargs['batch_size'] <= 0):
raise ValueError('Batch size must be positive integer value!')
if not 'epochs_before_stopping' in kwargs:
raise ValueError('Maximal number of consecutive epochs when validation score is not improving '
'is not specified!')
if (not isinstance(kwargs['epochs_before_stopping'], int)) or (kwargs['epochs_before_stopping'] <= 0):
raise ValueError('Maximal number of consecutive epochs when validation score is not improving must be '
'positive integer value!')
if kwargs['epochs_before_stopping'] > kwargs['max_epochs_number']:
raise ValueError('Maximal number of consecutive epochs when validation score is not improving must be '
'positive integer value!')
if not 'batch_norm' in kwargs:
raise ValueError('Flag of the batch normalization is not specified!')
if not isinstance(kwargs['batch_norm'], bool):
raise ValueError('Flag of the batch normalization must be boolean value!')
if not 'warm_start' in kwargs:
raise ValueError('Flag of the warm start is not specified!')
if not isinstance(kwargs['warm_start'], bool):
raise ValueError('Flag of the warm start must be boolean value!')
if not 'verbose' in kwargs:
raise ValueError('Flag of the verbose mode is not specified!')
if (not isinstance(kwargs['verbose'], bool)) and (not isinstance(kwargs['verbose'], int)):
raise ValueError('Flag of the verbose mode must be boolean or integer value!')
if not 'eval_metric' in kwargs:
raise ValueError('Metric for evaluation and early stopping is not specified!')
if not isinstance(kwargs['eval_metric'], str):
raise ValueError('Metric for evaluation and early stopping must be a string value!')
if kwargs['eval_metric'] not in {'Logloss', 'F1', 'ROC-AUC'}:
raise ValueError('"{0}" is unknown metric for evaluation and early stopping! '
'We expect "Logloss", "F1" or "ROC-AUC".'.format(kwargs['eval_metric']))
def check_train_data(self, X, y):
""" Проверить корректность обучающего (или тестового) множества входных примеров X и меток классов y.
Если что-то пошло не так, то бросить ValueError.
"""
X, y = check_X_y(X, y, accept_sparse=False, dtype=[numpy.float32, numpy.float64, numpy.uint8], allow_nd=True)
if len(X.shape) != 4:
raise ValueError('`X` must be a 4-D array (samples, input maps, rows of input map, columns of input map)!')
for sample_ind in range(y.shape[0]):
if y[sample_ind] < 0:
raise ValueError('Target values must be non-negative integer numbers!')
if set(y.tolist()) != set(range(int(y.max()) + 1)):
raise ValueError('Target values must be non-negative integer numbers!')
return X, y
def check_input_data(self, X):
""" Проверить корректность множества входных примеров X и бросить ValueError в случае ошибки. """
X = check_array(X, accept_sparse=False, dtype=[numpy.float32, numpy.float64, numpy.uint8], allow_nd=True)
if len(X.shape) != 4:
raise ValueError('`X` must be 4D array (samples, input maps, rows of input map, columns of input map)!')
if X.shape[1:] != self.input_size_:
raise ValueError('Samples of `X` do not correspond to the input structure! '
'Got {0}, expected {1}'.format(X.shape[1:], self.input_size_))
return X
def build_cnn(self, input_structure, number_of_classes, input_var, trainable_shared_params=None):
""" Построить вычислительный граф нейросети средствами Theano/Lasagne.
:param input_structure - 3-элементный кортеж, задающий количество входных карт, их высоту и ширину.
:param number_of_classes - число распознаваемых классов.
:param input_var - символьная входная переменная для вычислительного графа Theano.
:param trainable_shared_params - расшариваемые параметры нейросети-близнеца (или None, если близнецов нет).
:return кортеж из двух вычислительных графов Theano: для всей сети в целом и для сети без выходного слоя.
"""
l_in = lasagne.layers.InputLayer(
shape=(None, input_structure[0], input_structure[1], input_structure[2]),
input_var=input_var
)
input_size = (input_structure[1], input_structure[2])
conv_layers = self.layers['conv']
pooling_layers = self.layers['pool']
dense_layers = self.layers['dense']
if conv_layers[0][0] <= 0:
raise ValueError('Convolution layer 1: {0} is wrong number of feature maps!'.format(conv_layers[0][0]))
receptive_field_for_conv_layer = (
conv_layers[0][1][0] if conv_layers[0][1][0] > 0 else input_size[0],
conv_layers[0][1][1] if conv_layers[0][1][1] > 0 else input_size[1]
)
if (receptive_field_for_conv_layer[0] <= 0) or (receptive_field_for_conv_layer[1] <= 0):
raise ValueError('Convolution layer 1: ({0}, {1}) is wrong size of receptive field!'.format(
receptive_field_for_conv_layer[0], receptive_field_for_conv_layer[1]
))
feature_map_for_conv_layer = (
input_size[0] + 1 - receptive_field_for_conv_layer[0],
input_size[1] + 1 - receptive_field_for_conv_layer[1]
)
if (feature_map_for_conv_layer[0] <= 0) or (feature_map_for_conv_layer[1] <= 0):
raise ValueError('Convolution layer 1: ({0}, {1}) is wrong size of feature map!'.format(
feature_map_for_conv_layer[0], feature_map_for_conv_layer[1]
))
if self.batch_norm:
l_conv = lasagne.layers.batch_norm(
lasagne.layers.Conv2DLayer(
l_in, num_filters=conv_layers[0][0],
filter_size=receptive_field_for_conv_layer,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[0],
name='l_conv_1'
)
)
else:
l_conv = lasagne.layers.Conv2DLayer(
l_in, num_filters=conv_layers[0][0],
filter_size=receptive_field_for_conv_layer,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[0],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None else trainable_shared_params[1],
name='l_conv_1'
)
receptive_field_for_pool_layer = (
pooling_layers[0][0] if pooling_layers[0][0] > 0 else 1,
pooling_layers[0][1] if pooling_layers[0][1] > 0 else 1
)
if (receptive_field_for_pool_layer[0] <= 0) or (receptive_field_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer 1: ({0}, {1}) is wrong size of receptive field!'.format(
receptive_field_for_pool_layer[0], receptive_field_for_pool_layer[1]
))
feature_map_for_pool_layer = (
feature_map_for_conv_layer[0] // receptive_field_for_pool_layer[0],
feature_map_for_conv_layer[1] // receptive_field_for_pool_layer[1]
)
if (feature_map_for_pool_layer[0] <= 0) or (feature_map_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer 1: ({0}, {1}) is wrong size of feature map!'.format(
feature_map_for_pool_layer[0], feature_map_for_pool_layer[1]
))
l_pool = lasagne.layers.Pool2DLayer(
l_conv,
pool_size=receptive_field_for_pool_layer,
name='l_pool_1'
)
input_size = feature_map_for_pool_layer
for ind in range(len(conv_layers) - 1):
if conv_layers[ind + 1][0] <= 0:
raise ValueError('Convolution layer {0}: {1} is wrong number of feature maps!'.format(
ind + 2, conv_layers[ind + 1][0]
))
receptive_field_for_conv_layer = (
conv_layers[ind + 1][1][0] if conv_layers[ind + 1][1][0] > 0 else input_size[0],
conv_layers[ind + 1][1][1] if conv_layers[ind + 1][1][1] > 0 else input_size[1]
)
if (receptive_field_for_conv_layer[0] <= 0) or (receptive_field_for_conv_layer[1] <= 0):
raise ValueError('Convolution layer {0}: ({1}, {2}) is wrong size of receptive field!'.format(
ind + 2, receptive_field_for_conv_layer[0], receptive_field_for_conv_layer[1]
))
feature_map_for_conv_layer = (
input_size[0] + 1 - receptive_field_for_conv_layer[0],
input_size[1] + 1 - receptive_field_for_conv_layer[1]
)
if (feature_map_for_conv_layer[0] <= 0) or (feature_map_for_conv_layer[1] <= 0):
raise ValueError('Convolution layer {0}: ({1}, {2}) is wrong size of feature map!'.format(
ind + 2, feature_map_for_conv_layer[0], feature_map_for_conv_layer[1]
))
if self.batch_norm:
l_conv = lasagne.layers.batch_norm(
lasagne.layers.Conv2DLayer(
l_pool, num_filters=conv_layers[ind + 1][0],
filter_size=receptive_field_for_conv_layer,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[(ind + 1) * 3],
name='l_conv_{0}'.format(ind + 2)
)
)
else:
l_conv = lasagne.layers.Conv2DLayer(
l_pool, num_filters=conv_layers[ind + 1][0],
filter_size=receptive_field_for_conv_layer,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[(ind + 1) * 2],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None
else trainable_shared_params[(ind + 1) * 2 + 1],
name='l_conv_{0}'.format(ind + 2)
)
receptive_field_for_pool_layer = (
pooling_layers[ind + 1][0] if pooling_layers[ind + 1][0] > 0 else 1,
pooling_layers[ind + 1][1] if pooling_layers[ind + 1][1] > 0 else 1
)
if (feature_map_for_pool_layer[0] <= 0) or (feature_map_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer {0}: ({1}, {2}) is wrong size of feature map!'.format(
ind + 2, feature_map_for_pool_layer[0], feature_map_for_pool_layer[1]
))
feature_map_for_pool_layer = (
feature_map_for_conv_layer[0] // receptive_field_for_pool_layer[0],
feature_map_for_conv_layer[1] // receptive_field_for_pool_layer[1]
)
if (feature_map_for_pool_layer[0] <= 0) or (feature_map_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer {0}: ({1}, {2}) is wrong size of feature map!'.format(
ind + 2, feature_map_for_pool_layer[0], feature_map_for_pool_layer[1]
))
l_pool = lasagne.layers.Pool2DLayer(
l_conv,
pool_size=receptive_field_for_pool_layer,
name='l_pool_{0}'.format(ind + 2)
)
input_size = feature_map_for_pool_layer
layer_ind = len(conv_layers) * 3 if self.batch_norm else len(conv_layers) * 2
l_in_drop = lasagne.layers.DropoutLayer(l_pool, p=self.dropout)
if self.batch_norm:
l_hid_old = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(
l_in_drop, num_units=dense_layers[0],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
name='l_dense_1'
)
)
layer_ind += 3
else:
l_hid_old = lasagne.layers.DenseLayer(
l_in_drop, num_units=dense_layers[0],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None
else trainable_shared_params[layer_ind + 1],
name='l_dense_1'
)
layer_ind += 2
last_real_layer = l_hid_old
l_hid_old_drop = lasagne.layers.DropoutLayer(l_hid_old, p=self.dropout)
for ind in range(len(dense_layers) - 1):
if self.batch_norm:
l_hid_new = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(
l_hid_old_drop, num_units=dense_layers[ind + 1],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
name='l_dense_{0}'.format(ind + 2)
)
)
layer_ind += 3
else:
l_hid_new = lasagne.layers.DenseLayer(
l_hid_old_drop, num_units=dense_layers[ind + 1],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None
else trainable_shared_params[layer_ind + 1],
name='l_dense_{0}'.format(ind + 2)
)
layer_ind += 2
last_real_layer = l_hid_new
l_hid_new_drop = lasagne.layers.DropoutLayer(l_hid_new, p=self.dropout)
l_hid_old_drop = l_hid_new_drop
last_layer = l_hid_old_drop
if number_of_classes > 2:
cnn_output = lasagne.layers.DenseLayer(
last_layer, num_units=number_of_classes,
nonlinearity=lasagne.nonlinearities.softmax,
W=lasagne.init.GlorotUniform() if trainable_shared_params is None else trainable_shared_params[-2],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None else trainable_shared_params[-1],
name='l_cnn'
)
else:
cnn_output = lasagne.layers.DenseLayer(
last_layer, num_units=1,
nonlinearity=lasagne.nonlinearities.sigmoid,
W=lasagne.init.GlorotUniform() if trainable_shared_params is None else trainable_shared_params[-2],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None else trainable_shared_params[-1],
name='l_cnn'
)
return cnn_output, last_real_layer
def build_loss(self, number_of_classes, target_var, cnn, deterministic):
""" Построить вычислительный граф для функции потерь и классификационной функции средствами Theano/Lasagne.
:param number_of_classes: Число распознаваемых классов.
:param target_var: Символьная переменная Theano, задающая желаемые метки классов для обучения/тестирования.
:param cnn: вычислительный граф Theano для всей нейросети.
:param deterministic: булевый флаг, определяющий режим работы (True - тестирование, False - обучение).
:return 2-элементный кортеж: построенные графы для функции потерь и для классификационной функции.
"""
if number_of_classes > 2:
prediction = lasagne.layers.get_output(cnn)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
output_prediction = lasagne.layers.get_output(cnn, deterministic=deterministic)
else:
prediction = lasagne.layers.get_output(cnn)
loss = lasagne.objectives.binary_crossentropy(prediction, target_var)
output_prediction = T.flatten(lasagne.layers.get_output(cnn, deterministic=deterministic))
loss = loss.sum()
return loss, output_prediction
def dump_all(self):
""" Выполнить сериализацию нейросети в словарь (dict).
Метод выгружает значения всех параметров нейросети в словарь, ключами которого являются названия параметров,
а значениями - соответственно, значения. В сериализации участвуют абсолютно все параметры, кроме random_state,
т.е. и управляющие параметры, задаваемые, например, в конструкторе, и настраиваемые параметры, значения которых
устанавливаются по итогам обучения (веса нейросети, распознаваемые классы и прочее).
При сериализации выполняется копирование (а не передача по ссылка) всех составных структур данных.
:return: словарь для всех параметров нейросети.
"""
try:
check_is_fitted(self, ['cnn_', 'predict_fn_', 'n_iter_', 'input_size_', 'loss_value_', 'classes_list_'])
is_trained = True
except:
is_trained = False
params = self.get_params(True)
if is_trained:
params['weights_and_biases'] = lasagne.layers.get_all_param_values(self.cnn_)
params['loss_value_'] = self.loss_value_
params['n_iter_'] = self.n_iter_
params['input_size_'] = self.input_size_
params['classes_list_'] = copy.copy(self.classes_list_)
return params
def load_all(self, new_params):
""" Выполнить десериализацию нейросети из словаря (dict).
Метод проверяет корректность всех параметров нейросети, заданных во входном словаре, и в случае успешной
проверки переносит эти значения в нейросеть (в случае неудачи бросает исключение ValueError). В десериализации
участвуют абсолютно все параметры, кроме random_state, т.е. и управляющие параметры, задаваемые, например, в
конструкторе, и настраиваемые параметры, значения которых устанавливаются по итогам обучения (веса нейросети,
распознаваемые классы и прочее).
При десериализации выполняется копирование (а не передача по ссылка) всех составных структур данных.
:param new_params: словарь (dict) со всеми параметрами нейросети для десериализации.
:return: self
"""
if not isinstance(new_params, dict):
raise ValueError('`new_params` is wrong! Expected {0}.'.format(type({0: 1})))
self.check_params(**new_params)
expected_param_keys = {'layers', 'dropout', 'learning_rate', 'max_epochs_number', 'validation_fraction',
'epochs_before_stopping', 'beta1', 'beta2', 'epsilon', 'batch_size', 'verbose',
'batch_norm', 'warm_start', 'eval_metric'}
params_after_training = {'weights_and_biases', 'loss_value_', 'n_iter_', 'input_size_', 'classes_list_'}
is_fitted = len(set(new_params.keys())) > len(expected_param_keys)
if is_fitted:
if set(new_params.keys()) != (expected_param_keys | params_after_training):
raise ValueError('`new_params` does not contain the expected keys!')
self.layers = copy.deepcopy(new_params['layers'])
self.dropout = new_params['dropout']
self.learning_rate = new_params['learning_rate']
self.max_epochs_number = new_params['max_epochs_number']
self.validation_fraction = new_params['validation_fraction']
self.beta1 = new_params['beta1']
self.beta2 = new_params['beta2']
self.epsilon = new_params['epsilon']
self.verbose = new_params['verbose']
self.epochs_before_stopping = new_params['epochs_before_stopping']
self.batch_size = new_params['batch_size']
self.batch_norm = new_params['batch_norm']
self.warm_start = new_params['warm_start']
self.eval_metric = new_params['eval_metric']
if getattr(self, 'random_state', None) is None:
self.random_state = None
if is_fitted:
if not isinstance(new_params['loss_value_'], float):
raise ValueError('`new_params` is wrong! Generalization loss `loss_value_` must be '
'floating-point number!')
if not isinstance(new_params['n_iter_'], int):
raise ValueError('`new_params` is wrong! Generalization loss `n_iter_` must be positive integer!')
if new_params['n_iter_'] <= 0:
raise ValueError('`new_params` is wrong! Generalization loss `n_iter_` must be positive integer!')
if (not isinstance(new_params['input_size_'], tuple)) and (not isinstance(new_params['input_size_'], list)):
raise ValueError('`new_params` is wrong! All input data sizes `input_size_` must be list or tuple!')
if len(new_params['input_size_']) != 3:
raise ValueError('`new_params` is wrong! All input data sizes `input_size_` must be 3-D sequence!')
for cur in new_params['input_size_']:
if not isinstance(cur, int):
raise ValueError('`new_params` is wrong! Each input data size `input_size_` must be '
'positive integer number!')
if cur <= 0:
raise ValueError('`new_params` is wrong! Each input data size `input_size_` must be '
'positive integer number!')
if (not isinstance(new_params['classes_list_'], list)) and \
(not isinstance(new_params['classes_list_'], tuple)):
raise ValueError('`new_params` is wrong! The classes list `classes_list_` must be list or tuple!')
if len(new_params['classes_list_']) < 2:
raise ValueError('`new_params` is wrong! The classes list `classes_list_` must consist from '
'two or more classes!')
self.random_state = check_random_state(self.random_state)
self.loss_value_ = new_params['loss_value_']
self.n_iter_ = new_params['n_iter_']
self.classes_list_ = copy.copy(new_params['classes_list_'])
self.input_size_ = copy.copy(new_params['input_size_'])
cnn_input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
self.cnn_, _ = self.build_cnn(self.input_size_, len(self.classes_list_), cnn_input_var)
_, test_prediction = self.build_loss(len(self.classes_list_), target_var, self.cnn_, True)
lasagne.layers.set_all_param_values(self.cnn_, new_params['weights_and_biases'])
self.predict_fn_ = theano.function([cnn_input_var], test_prediction, allow_input_downcast=True)
return self
def __iterate_minibatches(self, inputs, targets, indices, shuffle=False):
""" Итерироваться "минибатчами" по датасету - входным примерам inputs и соответствующим меткам классов targets.
:param inputs: Входные примеры X.
:param targets: Метки классов y.
:param indices: Индексы интересных нам примеров, участвующих в итерировании.
:param shuffle: Булевый флажок, указывающий, итерироваться случайно или всё же последовательно.
:return Итератор (каждый элемент: "минибатч" из batch_size входных примеров и соответствующих им меток классов).
"""
for indices_in_batch in iterate(indices, self.batch_size, shuffle, self.random_state if shuffle else None):
yield inputs[indices_in_batch], targets[indices_in_batch]
def __iterate_minibatches_for_prediction(self, inputs):
""" Итерироваться "минибатчами" по входным примерам inputs.
:param inputs: Входные примеры X.
:return Итератор (каждый элемент: "минибатч" из batch_size входных примеров).
"""
for indices_in_batch in iterate(numpy.arange(0, inputs.shape[0], 1, numpy.int32), self.batch_size, False, None):
yield inputs[indices_in_batch]
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.load_all(self.dump_all())
return result
def __deepcopy__(self, memodict={}):
cls = self.__class__
result = cls.__new__(cls)
result.load_all(self.dump_all())
return result
def __getstate__(self):
""" Нужно для сериализации через pickle. """
return self.dump_all()
def __setstate__(self, state):
""" Нужно для десериализации через pickle. """
self.load_all(state)
|
StarcoderdataPython
|
4977918
|
#!/usr/bin/env python3
############################################################
# Usage: piScanner.py
# Imports barcode and date from sshScript to create the filename
# for a picture that is taken from a Raspberry Pi camera.
# This script is used in conjuncture with sshScript.
############################################################
import time
import picamera
import socket
# function to get the parameters (date and barcode) for the
# filename and returns them in a list
# Usage: List = getParam(0612,423423)
def getParam(date,barcode):
paramList = [date,barcode]
print("The date is " + str(paramList[0]))
print("The barcode is " + str(paramList[1]))
return paramList
# function to get the file name by taking an array
# that contains the parameters and formating it
# usage: name = getFileName(List)
def getFileName(list):
paramList = list
# get the hostname of the server/pi
piName = socket.gethostname()
picName = piName + "_" + paramList[0] +"_"+ paramList[1] + ".jpg"
return picName
# function to take the picture and name it
# Usage: takePic("myPicture")
def takePic(name):
picName = name
picPath = "/home/pi/Desktop/local-folder/"+picName
# picPath local directory must be the same as line 1 in syncScript
# and picPath local-folder must exist in the Raspberry Pi
# desktop before running this script
with picamera.PiCamera() as camera:
# Camera resolution is set for an 8 megapixel camera.
# Modify as desired (e.g. 2592, 1944 for a 5 megapixel camera).
camera.resolution = (3280, 2464)
# Set ISO to the desired value
camera.iso = 300
# Wait for the automatic gain control to settle
time.sleep(2)
# Now fix the values
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
g = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = g
camera.capture(filename, quality=100)
# starts the preview and then wait 1 seconds before
# taking the picture
time.sleep(1)
camera.capture(picPath)
print("Picture Taken: " + picName + "\n")
# run this script only if the script is run directly.
if __name__ == "__main__":
from sys import argv
# get the necessary arguments that will be passed
# by the shell script
List = getParam(argv[1],argv[2])
filename = getFileName(List)
takePic(filename)
|
StarcoderdataPython
|
6487040
|
from hazma.parameters import alpha_em, qe
from hazma.parameters import charged_pion_mass as mpi
from hazma.parameters import electron_mass as me
from hazma.parameters import muon_mass as mmu
from cmath import sqrt, log, pi
import numpy as np
class VectorMediatorFSR:
def __dnde_xx_to_v_to_ffg(self, egam, Q, f):
"""Return the fsr spectra for fermions from decay of vector mediator.
Computes the final state radiaton spectrum value dNdE from a vector
mediator given a gamma ray energy of `egam`, center of mass energy `Q`
and final state fermion `f`.
Paramaters
----------
egam : float
Gamma ray energy.
Q: float
Center of mass energy of mass of off-shell vector mediator.
f : float
Name of the final state fermion: "e" or "mu".
Returns
-------
spec_val : float
Spectrum value dNdE from vector mediator.
"""
if f == "e":
mf = me
elif f == "mu":
mf = mmu
mu_l = mf / Q
x = 2 * egam / Q
ret_val = 0.0
if (
4.0 * mf ** 2 <= (Q ** 2 - 2.0 * Q * egam) <= Q ** 2
and Q > 2.0 * mf
and Q > 2.0 * self.mx
):
val = ((
2
* alpha_em
* (
-(
sqrt(1 + (4 * mu_l ** 2) / (-1 + x))
* (2 - 2 * x + x ** 2 - 4 * (-1 + x) * mu_l ** 2)
)
+ (2 + (-2 + x) * x - 4 * x * mu_l ** 2 - 8 * mu_l ** 4)
* log(
-(
(1 + sqrt(1 + (4 * mu_l ** 2) / (-1 + x)))
/ (-1 + sqrt(1 + (4 * mu_l ** 2) / (-1 + x)))
)
)
)
) / (pi * Q * x * sqrt(1 - 4 * mu_l ** 2) * (1 + 2 * mu_l **
2))).real
assert val >= 0
return val
else:
return 0.0
def dnde_xx_to_v_to_ffg(self, egam, Q, f):
"""Return the fsr spectra for fermions from decay of vector mediator.
Computes the final state radiaton spectrum value dNdE from a vector
mediator given a gamma ray energy of `egam`, center of mass energy `Q`
and final state fermion `f`.
Paramaters
----------
egam : float
Gamma ray energy.
Q: float
Center of mass energy of mass of off-shell vector mediator.
f : float
Mass of the final state fermion.
Returns
-------
spec_val : float
Spectrum value dNdE from vector mediator.
"""
if hasattr(egam, "__len__"):
return np.array([self.__dnde_xx_to_v_to_ffg(e, Q, f) for e in egam])
else:
return self.__dnde_xx_to_v_to_ffg(egam, Q, f)
def __dnde_xx_to_v_to_pipig(self, egam, Q):
"""Unvectorized dnde_xx_to_v_to_pipig"""
mx = self.mx
mu_pi = mpi / Q
x = 2.0 * egam / Q
x_min = 0.0
x_max = 1 - 4.0 * mu_pi ** 2
if x < x_min or x > x_max or Q < 2 * mpi or Q < 2.0 * mx:
return 0.0
else:
val = ((
4
* alpha_em
* (
sqrt(1 + (4 * mu_pi ** 2) / (-1 + x))
* (-1 + x + x ** 2 - 4 * (-1 + x) * mu_pi ** 2)
+ (-1 + x + 2 * mu_pi ** 2)
* (-1 + 4 * mu_pi ** 2)
* log(
-(
(1 + sqrt(1 + (4 * mu_pi ** 2) / (-1 + x)))
/ (-1 + sqrt(1 + (4 * mu_pi ** 2) / (-1 + x)))
)
)
)
) / (pi * Q * x * (1 - 4 * mu_pi ** 2) ** 1.5)).real
assert val >= 0
return val
def dnde_xx_to_v_to_pipig(self, eng_gams, Q):
"""
Returns the gamma ray energy spectrum for two fermions annihilating
into two charged pions and a photon.
Parameters
----------
eng_gam : numpy.ndarray or double
Gamma ray energy.
Q : double
Center of mass energy, or sqrt((ppip + ppim + pg)^2).
Returns
-------
Returns gamma ray energy spectrum for
:math:`\chi\bar{\chi}\to\pi^{+}\pi^{-}\gamma` evaluated at the gamma
ray energy(ies).
"""
if hasattr(eng_gams, "__len__"):
return np.array(
[self.__dnde_xx_to_v_to_pipig(eng_gam, Q) for eng_gam in eng_gams]
)
else:
return self.__dnde_xx_to_v_to_pipig(eng_gams, Q)
|
StarcoderdataPython
|
11281250
|
from pygccxml import declarations
from pybindx.writers import base_writer
class CppConsturctorWrapperWriter(base_writer.CppBaseWrapperWriter):
"""
Manage addition of constructor wrapper code
"""
def __init__(self, class_info,
ctor_decl,
class_decl,
wrapper_templates,
class_short_name=None):
super(CppConsturctorWrapperWriter, self).__init__(wrapper_templates)
self.class_info = class_info
self.ctor_decl = ctor_decl
self.class_decl = class_decl
self.class_short_name = class_short_name
if self.class_short_name is None:
self.class_short_name = self.class_decl.name
def exclusion_critera(self):
# Check for exclusions
exclusion_args = self.class_info.hierarchy_attribute_gather('calldef_excludes')
ctor_arg_exludes = self.class_info.hierarchy_attribute_gather('constructor_arg_type_excludes')
for eachArg in self.ctor_decl.argument_types:
if eachArg.decl_string.replace(" ", "") in exclusion_args:
return True
for eachExclude in ctor_arg_exludes:
if eachExclude in eachArg.decl_string:
return True
for eachArg in self.ctor_decl.argument_types:
if "iterator" in eachArg.decl_string.lower():
return True
if self.ctor_decl.parent != self.class_decl:
return True
if self.ctor_decl.is_artificial and declarations.is_copy_constructor(self.ctor_decl):
return True
if self.class_decl.is_abstract and len(self.class_decl.recursive_bases)>0:
if any(t.related_class.is_abstract for t in self.class_decl.recursive_bases):
return True
return False
def add_self(self, output_string):
if self.exclusion_critera():
return output_string
output_string += " "*8 + '.def(py::init<'
num_arg_types = len(self.ctor_decl.argument_types)
for idx, eachArg in enumerate(self.ctor_decl.argument_types):
output_string += eachArg.decl_string
if idx < num_arg_types-1:
output_string += ", "
output_string += ' >()'
default_args = ""
if not self.default_arg_exclusion_criteria():
for eachArg in self.ctor_decl.arguments:
default_args += ', py::arg("{}")'.format(eachArg.name)
if eachArg.default_value is not None:
default_args += ' = ' + eachArg.default_value
output_string += default_args + ')\n'
return output_string
|
StarcoderdataPython
|
341715
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
"""
Instantiate a simple mutable record using raw data
"""
def test():
import pyre.records
class record(pyre.records.record):
"""
A sample record
"""
sku = pyre.records.measure()
description = pyre.records.measure()
cost = pyre.records.measure()
# build a record
r = record.pyre_mutable(data=("9-4013", "organic kiwi", .85))
# check
assert r.sku == "9-4013"
assert r.description == "organic kiwi"
assert r.cost == .85
# make a change to the cost
r.cost = 1
# and verify that it was stored
assert r.cost == 1
# all done
return r
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
|
StarcoderdataPython
|
6579762
|
<filename>model/layers.py
"""Creates the layers for a BiMPM model architecture."""
import torch
import torch.nn as nn
import torch.nn.functional as F
class CharacterRepresentationEncoder(nn.Module):
"""A character embedding layer with embeddings that are learned along
with other network parameters during training.
"""
def __init__(self, args):
"""Initialize the character embedding layer model architecture, and
the char rnn.
Parameters
----------
args : Args
An object with all arguments for BiMPM model.
"""
super(CharacterRepresentationEncoder, self).__init__()
self.char_hidden_size = args.char_hidden_size
self.char_encoder = nn.Embedding(
args.char_vocab_size, args.char_input_size, padding_idx=0)
self.lstm = nn.LSTM(
input_size=args.char_input_size,
hidden_size=args.char_hidden_size,
num_layers=1,
bidirectional=False,
batch_first=True)
def forward(self, chars):
"""Defines forward pass computations flowing from inputs to
outputs in the network.
Parameters
----------
chars : Tensor
A PyTorch Tensor with shape (batch_size, seq_len, max_word_len)
Returns
-------
Tensor
A PyTorch Tensor with shape (batch_size, seq_len,
char_hidden_size).
"""
batch_size, seq_len, max_word_len = chars.size()
chars = chars.view(batch_size * seq_len, max_word_len)
# out_shape: (1, batch_size * seq_len, char_hidden_size)
chars = self.lstm(self.char_encoder(chars))[-1][0]
return chars.view(-1, seq_len, self.char_hidden_size)
class WordRepresentationLayer(nn.Module):
"""A word representation layer which will create word and char embeddings
which will then be concatenated and trained with other model parameters.
"""
def __init__(self, args, model_data):
"""Initialize the word representation layer, and store pre-trained
embeddings. Also initialize the char rnn.
Parameters
----------
args : Args
An object with all arguments for BiMPM model.
model_data : {Quora, SNLI}
A data loading object which returns word vectors and sentences.
"""
super(WordRepresentationLayer, self).__init__()
self.drop = args.dropout
self.word_encoder = nn.Embedding(args.word_vocab_size, args.word_dim)
self.word_encoder.weight.data.copy_(model_data.TEXT.vocab.vectors)
self.word_encoder.weight.requires_grad = False # Freeze parameters
self.char_encoder = CharacterRepresentationEncoder(args)
def dropout(self, tensor):
"""Defines a dropout function to regularize the parameters.
Parameters
----------
tensor : Tensor
A Pytorch Tensor.
Returns
-------
Tensor
A PyTorch Tensor with same size as input.
"""
return F.dropout(tensor, p=self.drop, training=self.training)
def forward(self, p):
"""Defines forward pass computations flowing from inputs to
outputs in the network.
Parameters
----------
p : Sentence
A sentence object with chars and word batches.
Returns
-------
Tensor
A PyTorch Tensor with size (batch_size, seq_len,
word_dim + char_hidden_size).
"""
words = self.word_encoder(p['words'])
chars = self.char_encoder(p['chars'])
p = torch.cat([words, chars], dim=-1)
return self.dropout(p)
class ContextRepresentationLayer(nn.Module):
"""A context representation layer to incorporate contextual information
into the representation of each time step of p and q.
"""
def __init__(self, args):
"""Initialize the context representation layer, and initialize an
lstm.
Parameters
----------
args : Args
An object with all arguments for BiMPM model.
"""
super(ContextRepresentationLayer, self).__init__()
self.drop = args.dropout
self.input_size = args.word_dim + args.char_hidden_size
self.lstm = nn.LSTM(
input_size=self.input_size,
hidden_size=args.hidden_size,
num_layers=1,
bidirectional=True,
batch_first=True)
def dropout(self, tensor):
"""Defines a dropout function to regularize the parameters.
Parameters
----------
tensor : Tensor
A Pytorch Tensor.
Returns
-------
Tensor
A PyTorch Tensor with same size as input.
"""
return F.dropout(tensor, p=self.drop, training=self.training)
def forward(self, p):
"""Defines forward pass computations flowing from inputs to
outputs in the network.
Parameters
----------
p : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
word_dim + char_hidden_size).
Returns
-------
Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes)
"""
p = self.lstm(p)[0]
return self.dropout(p)
class MatchingLayer(nn.Module):
"""A matching layer to compare contextual embeddings from one sentence
against the contextual embeddings of the other sentence.
"""
def __init__(self, args):
"""Initialize the mactching layer architecture.
Parameters
----------
args : Args
An object with all arguments for BiMPM model.
"""
super(MatchingLayer, self).__init__()
self.drop = args.dropout
self.hidden_size = args.hidden_size
self.l = args.num_perspectives
self.W = nn.ParameterList([
nn.Parameter(torch.rand(self.l, self.hidden_size))
for _ in range(8)
])
def dropout(self, tensor):
"""Defines a dropout function to regularize the parameters.
Parameters
----------
tensor : Tensor
A Pytorch Tensor.
Returns
-------
Tensor
A PyTorch Tensor with same size as input.
"""
return F.dropout(tensor, p=self.drop, training=self.training)
def cat(self, *args):
"""Concatenate matching vectors.
Parameters
----------
*args
Variable length argument list.
Returns
-------
Tensor
A PyTorch Tensor with input tensors concatenated over dim 2.
"""
return torch.cat(list(args), dim=2) # dim 2 is num_perspectives
def split(self, tensor, direction='fw'):
"""Split the output of an bidirectional rnn into forward or
backward passes.
Parameters
----------
tensor : Tensor
A Pytorch Tensor containing the output of a bidirectional rnn.
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
Returns
-------
Tensor
A Pytorch Tensor for the rnn pass in the specified direction.
"""
if direction == 'fw':
return torch.split(tensor, self.hidden_size, dim=-1)[0]
elif direction == 'bw':
return torch.split(tensor, self.hidden_size, dim=-1)[-1]
def match(self,
p,
q,
w,
direction='fw',
split=True,
stack=True,
cosine=False):
"""Match two sentences based on various matching strategies and
time-step constraints.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes) if split is True, else it is size
(batch_size, seq_len, hidden_size).
w : Parameter
A Pytorch Parameter with size (num_perspectives, hidden_size).
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
split : bool, optional
Split input Tensor if output from bidirectional rnn
(default is True).
stack : bool, optional
Stack input Tensor if input size is (batch_size, hidden_size),
for the second sentence `q` for example, in the case of the
full-matching strategy, when matching only the last time-step
(default is True).
cosine : bool, optional
Perform cosine similarity using built-in PyTorch Function, for
example, in the case of a full-matching or attentive-matching
strategy (default is False).
Returns
-------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len, l) with the
weights multiplied by the input sentence.
Tensor
If cosine=True, returns a tensor of size (batch_size, seq_len, l)
representing the distance between `p` and `q`.
"""
if split:
p = self.split(p, direction)
q = self.split(q, direction)
if stack:
seq_len = p.size(1)
# out_shape: (batch_size, seq_len_p, hidden_size)
if direction == 'fw':
q = torch.stack([q[:, -1, :]] * seq_len, dim=1)
elif direction == 'bw':
q = torch.stack([q[:, 0, :]] * seq_len, dim=1)
# out_shape: (1, l, 1, hidden_size)
w = w.unsqueeze(0).unsqueeze(2)
# out_shape: (batch_size, l, seq_len_{p, q}, hidden_size)
p = w * torch.stack([p] * self.l, dim=1)
q = w * torch.stack([q] * self.l, dim=1)
if cosine:
# out_shape: (batch_size, seq_len, l)
return F.cosine_similarity(p, q, dim=-1).permute(0, 2, 1)
return (p, q)
def attention(self, p, q, w, direction='fw', att='mean'):
"""Create either a mean or max attention vector for the attentive
matching strategies.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
w : Parameter
A Pytorch Parameter with size (num_perspectives, hidden_size).
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
att : str, optional
The type of attention vector to generate (default is 'mean').
Returns
-------
att_p_match, att_q_match : Tensor
A PyTorch Tensor with size (batch_size, seq_len, hidden_size).
"""
# out_shape: (batch_size, seq_len_{p, q}, hidden_size)
p = self.split(p, direction)
q = self.split(q, direction)
# out_shape: (batch_size, seq_len_p, 1)
p_norm = p.norm(p=2, dim=2, keepdim=True)
# out_shape: (batch_size, 1, seq_len_q)
q_norm = q.norm(p=2, dim=2, keepdim=True).permute(0, 2, 1)
# out_shape: (batch_size, seq_len_p, seq_len_q)
dot = torch.bmm(p, q.permute(0, 2, 1))
magnitude = p_norm * q_norm
cosine = dot / magnitude
# out_shape: (batch_size, seq_len_p, seq_len_q, hidden_size)
weighted_p = p.unsqueeze(2) * cosine.unsqueeze(-1)
weighted_q = q.unsqueeze(1) * cosine.unsqueeze(-1)
if att == 'mean':
# out_shape: (batch_size, seq_len_{q, p}, hidden_size))
p_vec = weighted_p.sum(dim=1) /\
cosine.sum(dim=1, keepdim=True).permute(0, 2, 1)
q_vec = weighted_q.sum(dim=2) / cosine.sum(dim=2, keepdim=True)
elif att == 'max':
# out_shape: (batch_size, seq_len_{q, p}, hidden_size)
p_vec, _ = weighted_p.max(dim=1)
q_vec, _ = weighted_q.max(dim=2)
# out_shape: (batch_size, seq_len_{p, q}, l)
att_p_match = self.match(
p, q_vec, w, split=False, stack=False, cosine=True)
att_q_match = self.match(
q, p_vec, w, split=False, stack=False, cosine=True)
return (att_p_match, att_q_match)
def full_match(self, p, q, w, direction='fw'):
"""Match each contextual embedding with the last time-step of the other
sentence for either the forward or backward pass.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
w : Parameter
A Pytorch Parameter with size (num_perspectives, hidden_size).
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
Returns
-------
Tensor
A PyTorch Tensor with size (batch_size, seq_len, l).
"""
# out_shape: (batch_size, seq_len_{p, q}, l)
return self.match(
p, q, w, direction, split=True, stack=True, cosine=True)
def maxpool_match(self, p, q, w, direction='fw'):
"""Match each contextual embedding with each time-step of the other
sentence for either the forward or backward pass.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
w : Parameter
A Pytorch Parameter with size (num_perspectives, hidden_size).
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
Returns
-------
pool_p, pool_q : array_like
A tuple of PyTorch Tensors with size (batch_size, seq_len, l).
"""
# out_shape: (batch_size, l, seq_len_{p, q}, hidden_size)
p, q = self.match(
p, q, w, direction, split=True, stack=False, cosine=False)
# out_shape: (batch_size, l, seq_len_{p, q}, 1)
p_norm = p.norm(p=2, dim=-1, keepdim=True)
q_norm = q.norm(p=2, dim=-1, keepdim=True)
# out_shape: (batch_size, l, seq_len_p, seq_len_q)
dot = torch.matmul(p, q.permute(0, 1, 3, 2))
magnitude = p_norm * q_norm.permute(0, 1, 3, 2)
# out_shape: (batch_size, seq_len_p, seq_len_q, l)
cosine = (dot / magnitude).permute(0, 2, 3, 1)
# out_shape: (batch_size, seq_len_{p, q}, l)
pool_p, _ = cosine.max(dim=2)
pool_q, _ = cosine.max(dim=1)
return (pool_p, pool_q)
def attentive_match(self, p, q, w, direction='fw'):
"""Match each contextual embedding with its mean attentive vector.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
w : Parameter
A Pytorch Parameter with size (num_perspectives, hidden_size).
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
Returns
-------
array_like
A tuple of PyTorch Tensors with size (batch_size, seq_len, l).
"""
# out_shape: (batch_size, seq_len_{p, q}, l)
return self.attention(p, q, w, direction, att='mean')
def max_attentive_match(self, p, q, w, direction='fw'):
"""Match each contextual embedding with its max attentive vector.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
w : Parameter
A Pytorch Parameter with size (num_perspectives, hidden_size).
direction : str, optional
The direction of the rnn pass to return (default is 'fw').
Returns
-------
array_like
A tuple of PyTorch Tensors with size (batch_size, seq_len, l).
"""
# out_shape: (batch_size, seq_len_{p, q}, l)
return self.attention(p, q, w, direction, att='max')
def match_operation(self, p, q, W):
"""Match each contextual embedding with its attentive vector.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
W : ParameterList
A list of Pytorch Parameters with size
(num_perspectives, hidden_size).
Returns
-------
array_like
A list of PyTorch Tensors of size (batch_size, seq_len, l*8).
"""
full_p2q_fw = self.full_match(p, q, W[0], 'fw')
full_p2q_bw = self.full_match(p, q, W[1], 'bw')
full_q2p_fw = self.full_match(q, p, W[0], 'fw')
full_q2p_bw = self.full_match(q, p, W[1], 'bw')
pool_p_fw, pool_q_fw = self.maxpool_match(p, q, W[2], 'fw')
pool_p_bw, pool_q_bw = self.maxpool_match(p, q, W[3], 'bw')
att_p2mean_fw, att_q2mean_fw = self.attentive_match(p, q, W[4], 'fw')
att_p2mean_bw, att_q2mean_bw = self.attentive_match(p, q, W[5], 'bw')
att_p2max_fw, att_q2max_fw = self.max_attentive_match(p, q, W[6], 'fw')
att_p2max_bw, att_q2max_bw = self.max_attentive_match(p, q, W[7], 'bw')
# Concatenate all the vectors for each sentence
p_vec = self.cat(full_p2q_fw, pool_p_fw, att_p2mean_fw, att_p2max_fw,
full_p2q_bw, pool_p_bw, att_p2mean_bw, att_p2max_bw)
q_vec = self.cat(full_q2p_fw, pool_q_fw, att_q2mean_fw, att_q2max_fw,
full_q2p_bw, pool_q_bw, att_q2mean_bw, att_q2max_bw)
# out_shape: (batch_size, seq_len_{p, q}, l*8)
return (self.dropout(p_vec), self.dropout(q_vec))
def forward(self, p, q):
"""Defines forward pass computations flowing from inputs to
outputs in the network.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len,
hidden_size, num_passes).
Returns
-------
array_like
A list of PyTorch Tensors of size (batch_size, seq_len, l*8).
"""
return self.match_operation(p, q, self.W)
class AggregationLayer(nn.Module):
"""An aggregation layer to combine two sequences of matching vectors into
fixed-length matching vector.
"""
def __init__(self, args):
"""Initialize the aggregation layer architecture.
Parameters
----------
args : Args
An object with all arguments for BiMPM model.
"""
super(AggregationLayer, self).__init__()
self.hidden_size = args.hidden_size
self.drop = args.dropout
self.lstm = nn.LSTM(
input_size=args.num_perspectives * 8,
hidden_size=args.hidden_size,
num_layers=1,
bidirectional=True,
batch_first=True)
def dropout(self, tensor):
"""Defines a dropout function to regularize the parameters.
Parameters
----------
tensor : Tensor
A Pytorch Tensor.
Returns
-------
Tensor
A PyTorch Tensor with same size as input.
"""
return F.dropout(tensor, p=self.drop, training=self.training)
def forward(self, p, q):
"""Defines forward pass computations flowing from inputs to
outputs in the network.
Parameters
----------
p, q : Tensor
A PyTorch Tensor with size (batch_size, seq_len, l*8).
Returns
-------
Tensor
A PyTorch Tensor of size (batch_size, hidden_size*4).
"""
# out_shape: (2, batch_size, hidden_size)
p = self.lstm(p)[-1][0]
q = self.lstm(q)[-1][0]
# out_shape: (batch_size, hidden_size*4)
x = torch.cat([
p.permute(1, 0, 2).contiguous().view(-1, self.hidden_size * 2),
q.permute(1, 0, 2).contiguous().view(-1, self.hidden_size * 2)
],
dim=1)
return self.dropout(x)
class PredictionLayer(nn.Module):
"""An prediction layer to evaluate the probability distribution for a class
given the two sentences. The number of outputs would change based on task.
"""
def __init__(self, args):
"""Initialize the prediction layer architecture.
Parameters
----------
args : Args
An object with all arguments for BiMPM model.
"""
super(PredictionLayer, self).__init__()
self.drop = args.dropout
self.hidden_layer = nn.Linear(args.hidden_size * 4,
args.hidden_size * 2)
self.output_layer = nn.Linear(args.hidden_size * 2, args.class_size)
def dropout(self, tensor):
"""Defines a dropout function to regularize the parameters.
Parameters
----------
tensor : Tensor
A Pytorch Tensor.
Returns
-------
Tensor
A PyTorch Tensor with same size as input.
"""
return F.dropout(tensor, p=self.drop, training=self.training)
def forward(self, match_vec):
"""Defines forward pass computations flowing from inputs to
outputs in the network.
Parameters
----------
match_vec : Tensor
A PyTorch Tensor of size (batch_size, hidden_size*4).
Returns
-------
Tensor
A PyTorch Tensor of size (batch_size, class_size).
"""
x = torch.tanh(self.hidden_layer(match_vec))
return self.output_layer(self.dropout(x))
|
StarcoderdataPython
|
12821962
|
<reponame>CitrineInformatics/pypif-sdk
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pypif_sdk.func import *
from pypif_sdk.func.calculate_funcs import _expand_formula_, _expand_hydrate_, _create_compositional_array_, _consolidate_elemental_array_, _calculate_n_atoms_, _add_ideal_atomic_weights_, _add_ideal_weight_percent_, _create_emprical_compositional_array_, _add_atomic_percents_, _get_element_in_pif_composition_
from pypif.obj import *
import json
test_pif_one = ChemicalSystem(
names=["nonanal"],
chemical_formula="C9H18O",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_two = ChemicalSystem(
names=["nonanal"],
chemical_formula="C6H10OC3H8",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_three = ChemicalSystem(
names=["parenth nonanal"],
chemical_formula="CH3(CH2)7OHC",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_four = ChemicalSystem(
names=["copper(II) nitrate trihydrate"],
chemical_formula="Cu(NO3)2 · 3H2O",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_five = ChemicalSystem(
names=["ammonium hexathiocyanoplatinate(IV)"],
chemical_formula="(NH4)2[Pt(SCN)6]",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_six = ChemicalSystem(
names=["ammonium hexathiocyanoplatinate(IV)"],
chemical_formula="(NH$_{4}$)$_{2}$[Pt(SCN)$_6$]",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_seven = ChemicalSystem(
names=["calcium sulfate hemihydrate"],
chemical_formula="CaSO4·0.5H2O",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_eight = ChemicalSystem(
names=["Zr glass"],
chemical_formula="Zr46.75Ti8.25Cu7.5Ni10Be27.5",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])]
)
test_pif_nine = ChemicalSystem(
names=["Zr glass"],
chemical_formula="Zr46.75Ti8.25Cu7.5Ni10Be27.5",
properties=[Property(name="foo", scalars=[Scalar(value="bar")])],
composition=[Composition(element="Zirconium")]
)
def test_calculate_ideal_atomic_percent():
"""
Tests that calculate_ideal_atomic_percent() works under a variety of circumstances
"""
pif_one = calculate_ideal_atomic_percent(test_pif_one)
assert round(pif_one.composition[0].idealAtomicPercent, 2) == 32.14
assert round(pif_one.composition[1].idealAtomicPercent, 2) == 64.29
assert round(pif_one.composition[2].idealAtomicPercent, 2) == 3.57
pif_two = calculate_ideal_atomic_percent(test_pif_two)
assert len(pif_two.composition) == 3
assert round(pif_two.composition[0].idealAtomicPercent, 2) == 32.14
assert round(pif_two.composition[1].idealAtomicPercent, 2) == 64.29
assert round(pif_two.composition[2].idealAtomicPercent, 2) == 3.57
pif_three = calculate_ideal_atomic_percent(test_pif_three)
assert len(pif_three.composition) == 3
assert round(pif_three.composition[0].idealAtomicPercent, 2) == 32.14
assert round(pif_three.composition[1].idealAtomicPercent, 2) == 64.29
assert round(pif_three.composition[2].idealAtomicPercent, 2) == 3.57
pif_four = calculate_ideal_atomic_percent(test_pif_four)
assert len(pif_four.composition) == 4
assert round(pif_four.composition[0].idealAtomicPercent, 2) == 5.56
assert round(pif_four.composition[1].idealAtomicPercent, 2) == 11.11
assert round(pif_four.composition[2].idealAtomicPercent, 2) == 50.00
assert round(pif_four.composition[3].idealAtomicPercent, 2) == 33.33
pif_five = calculate_ideal_atomic_percent(test_pif_five)
assert len(pif_five.composition) == 5
assert round(pif_five.composition[0].idealAtomicPercent, 2) == 27.59
assert round(pif_five.composition[1].idealAtomicPercent, 2) == 27.59
assert round(pif_five.composition[2].idealAtomicPercent, 2) == 3.45
assert round(pif_five.composition[3].idealAtomicPercent, 2) == 20.69
assert round(pif_five.composition[4].idealAtomicPercent, 2) == 20.69
pif_six = calculate_ideal_atomic_percent(test_pif_six)
assert len(pif_six.composition) == 5
assert round(pif_six.composition[0].idealAtomicPercent, 2) == 27.59
assert round(pif_six.composition[1].idealAtomicPercent, 2) == 27.59
assert round(pif_six.composition[2].idealAtomicPercent, 2) == 3.45
assert round(pif_six.composition[3].idealAtomicPercent, 2) == 20.69
assert round(pif_six.composition[4].idealAtomicPercent, 2) == 20.69
pif_seven = calculate_ideal_atomic_percent(test_pif_seven)
assert len(pif_seven.composition) == 4
assert round(pif_seven.composition[0].idealAtomicPercent, 2) == 13.33
assert round(pif_seven.composition[1].idealAtomicPercent, 2) == 13.33
assert round(pif_seven.composition[2].idealAtomicPercent, 2) == 60.00
assert round(pif_seven.composition[3].idealAtomicPercent, 2) == 13.33
pif_eight = calculate_ideal_atomic_percent(test_pif_eight)
assert len(pif_eight.composition) == 5
assert round(pif_eight.composition[0].idealAtomicPercent, 2) == 46.75
assert round(pif_eight.composition[1].idealAtomicPercent, 2) == 8.25
assert round(pif_eight.composition[2].idealAtomicPercent, 2) == 7.5
assert round(pif_eight.composition[3].idealAtomicPercent, 2) == 10
assert round(pif_eight.composition[4].idealAtomicPercent, 2) == 27.5
def test_calculate_ideal_weight_percent():
"""
Tests that calculate_ideal_weight_percent() works under a variety of circumstances
"""
pif_one = calculate_ideal_weight_percent(test_pif_one)
assert len(pif_one.composition) == 3
assert round(pif_one.composition[0].idealWeightPercent, 2) == 76.00
assert round(pif_one.composition[1].idealWeightPercent, 2) == 12.76
assert round(pif_one.composition[2].idealWeightPercent, 2) == 11.25
pif_two = calculate_ideal_weight_percent(test_pif_two)
assert len(pif_two.composition) == 3
assert round(pif_two.composition[0].idealWeightPercent, 2) == 76.00
assert round(pif_two.composition[1].idealWeightPercent, 2) == 12.76
assert round(pif_two.composition[2].idealWeightPercent, 2) == 11.25
pif_three = calculate_ideal_weight_percent(test_pif_three)
assert len(pif_three.composition) == 3
assert round(pif_three.composition[0].idealWeightPercent, 2) == 76.00
assert round(pif_three.composition[1].idealWeightPercent, 2) == 12.76
assert round(pif_three.composition[2].idealWeightPercent, 2) == 11.25
pif_four = calculate_ideal_weight_percent(test_pif_four)
assert len(pif_four.composition) == 4
assert round(pif_four.composition[0].idealWeightPercent, 2) == 26.30
assert round(pif_four.composition[1].idealWeightPercent, 2) == 11.60
assert round(pif_four.composition[2].idealWeightPercent, 2) == 59.60
assert round(pif_four.composition[3].idealWeightPercent, 2) == 2.50
pif_five = calculate_ideal_weight_percent(test_pif_five)
assert len(pif_five.composition) == 5
assert round(pif_five.composition[0].idealWeightPercent, 2) == 19.33
assert round(pif_five.composition[1].idealWeightPercent, 2) == 1.39
assert round(pif_five.composition[2].idealWeightPercent, 2) == 33.66
assert round(pif_five.composition[3].idealWeightPercent, 2) == 33.19
assert round(pif_five.composition[4].idealWeightPercent, 2) == 12.43
pif_six = calculate_ideal_weight_percent(test_pif_six)
assert len(pif_six.composition) == 5
assert round(pif_six.composition[0].idealWeightPercent, 2) == 19.33
assert round(pif_six.composition[1].idealWeightPercent, 2) == 1.39
assert round(pif_six.composition[2].idealWeightPercent, 2) == 33.66
assert round(pif_six.composition[3].idealWeightPercent, 2) == 33.19
assert round(pif_six.composition[4].idealWeightPercent, 2) == 12.43
pif_seven = calculate_ideal_weight_percent(test_pif_seven)
assert len(pif_seven.composition) == 4
assert round(pif_seven.composition[0].idealWeightPercent, 2) == 27.61
assert round(pif_seven.composition[1].idealWeightPercent, 2) == 22.09
assert round(pif_seven.composition[2].idealWeightPercent, 2) == 49.60
assert round(pif_seven.composition[3].idealWeightPercent, 2) == 0.69
pif_eight = calculate_ideal_weight_percent(test_pif_eight)
assert len(pif_eight.composition) == 5
assert round(pif_eight.composition[0].idealWeightPercent, 2) == 71.42
assert round(pif_eight.composition[1].idealWeightPercent, 2) == 6.61
assert round(pif_eight.composition[2].idealWeightPercent, 2) == 7.98
assert round(pif_eight.composition[3].idealWeightPercent, 2) == 9.83
assert round(pif_eight.composition[4].idealWeightPercent, 2) == 4.15
def test_expand_formula_():
"""
Tests _expand_formula_() to ensure complex parentheses are handled correctly
"""
assert _expand_formula_("(NH4)2[Pt(SCN)6]") == "N2H8PtS6C6N6"
def test_expand_hydrate_():
"""
Tests _expand_hydrate_() to ensure hydrate chemical formulas can be properly expanded even with decimal values
"""
assert _expand_hydrate_(5, "CaSO4·0.5H2O") == "CaSO4HO0.5"
def test_create_compositional_array_():
"""
Tests that _create_compositional_array_() returns an array of compositions for both whole number and decimal values
"""
assert _create_compositional_array_("N2H8PtS6C6N6") == [
{"symbol": "N", "occurances": 2},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 6},
{"symbol": "N", "occurances": 6}]
assert _create_compositional_array_("CaSO4HO0.5") == [
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4},
{"symbol": "H", "occurances": 1},
{"symbol": "O", "occurances": 0.5}
]
def test_consolidate_elemental_array_():
"""
Tests that _consolidate_elemental_array_() returns a consolidates array of compositions for both whole number and decimal values
"""
input_array = [
{"symbol": "N", "occurances": 2},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 6},
{"symbol": "N", "occurances": 6},
{"symbol": "C", "occurances": 2}
]
output_array = [
{"symbol": "N", "occurances": 8},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 8}
]
input_array_dec = [
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4},
{"symbol": "H", "occurances": 1},
{"symbol": "O", "occurances": 0.5}
]
output_array_dec = [
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4.5},
{"symbol": "H", "occurances": 1}
]
assert _consolidate_elemental_array_(input_array) == output_array
assert _consolidate_elemental_array_(input_array_dec) == output_array_dec
def test_calculate_n_atoms_():
"""
Tests that _calculate_n_atoms_ returns the correct value for both whole number and decimal values
"""
assert _calculate_n_atoms_([
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4},
{"symbol": "H", "occurances": 1},
{"symbol": "O", "occurances": 0.5}
]) == 7.5
def test_add_ideal_atomic_weights_():
"""
Tests that _add_ideal_atomic_weights_() returns a modified array
"""
input_array = [
{"symbol": "N", "occurances": 8},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 8}
]
output_array = [
{"symbol": "N", "occurances": 8, "weight": 14.007 * 8},
{"symbol": "H", "occurances": 8, "weight": 1.008 * 8},
{"symbol": "Pt", "occurances": 1, "weight": 195.084},
{"symbol": "S", "occurances": 6, "weight": 32.06 * 6},
{"symbol": "C", "occurances": 8, "weight": 12.011 * 8}
]
assert _add_ideal_atomic_weights_(input_array) == output_array
def test_add_ideal_weight_percent_():
"""
Tests that _add_ideal_weight_percent_() returns a modified array
"""
input_array = [
{"symbol": "N", "occurances": 8, "weight": 14.007 * 8},
{"symbol": "H", "occurances": 8, "weight": 1.008 * 8},
{"symbol": "Pt", "occurances": 1, "weight": 195.084},
{"symbol": "S", "occurances": 6, "weight": 32.06 * 6},
{"symbol": "C", "occurances": 8, "weight": 12.011 * 8}
]
output_array = [
{"symbol": "N", "occurances": 8, "weight": 14.007 * 8,
"weight_percent": 14.007 * 8 / 603.652 * 100},
{"symbol": "H", "occurances": 8, "weight": 1.008 * 8,
"weight_percent": 1.008 * 8 / 603.652 * 100},
{"symbol": "Pt", "occurances": 1, "weight": 195.084,
"weight_percent": 195.084 / 603.652 * 100},
{"symbol": "S", "occurances": 6, "weight": 32.06 * 6,
"weight_percent": 32.06 * 6 / 603.652 * 100},
{"symbol": "C", "occurances": 8, "weight": 12.011 * 8,
"weight_percent": 12.011 * 8 / 603.652 * 100}
]
assert _add_ideal_weight_percent_(input_array) == output_array
def test_create_emprical_compositional_array_():
"""
Tests that _create_emprical_compositional_array_() returns an emperical array of elements
"""
assert _create_emprical_compositional_array_("CaSO4HO0.5") == [
{"symbol": "Ca", "occurances": 1},
{"symbol": "S", "occurances": 1},
{"symbol": "O", "occurances": 4.5},
{"symbol": "H", "occurances": 1}
]
def test_add_atomic_percents_():
"""
Tests that _add_atomic_percents_() returns a modified array
"""
input_array = [
{"symbol": "N", "occurances": 8},
{"symbol": "H", "occurances": 8},
{"symbol": "Pt", "occurances": 1},
{"symbol": "S", "occurances": 6},
{"symbol": "C", "occurances": 8}
]
output_array = [
{"symbol": "N", "occurances": 8, "atomic_percent": 8 / 31 * 100},
{"symbol": "H", "occurances": 8, "atomic_percent": 8 / 31 * 100},
{"symbol": "Pt", "occurances": 1, "atomic_percent": 1 / 31 * 100},
{"symbol": "S", "occurances": 6, "atomic_percent": 6 / 31 * 100},
{"symbol": "C", "occurances": 8, "atomic_percent": 8 / 31 * 100}
]
assert _add_atomic_percents_(input_array) == output_array
def test_get_element_in_pif_composition_():
"""
tests that _check_if_element_exists_in_pif_composition_() can check for both element names and element symbols
"""
assert _get_element_in_pif_composition_(test_pif_eight, "Ca") == False
correct_comp = Composition(element="Zirconium")
assert dumps(_get_element_in_pif_composition_(
test_pif_nine, "Zr")) == dumps([correct_comp, 0])
|
StarcoderdataPython
|
12848069
|
from functools import wraps
from flask import current_app, request
from flask_restful import abort
def auth_simple_token(func):
@wraps(func)
def wrapper(*args, **kwargs):
token = request.headers.get('x-simple-auth')
if current_app.config['API_KEY'] == token:
return func(*args, **kwargs)
abort(401)
return wrapper
|
StarcoderdataPython
|
3427959
|
import numpy as np
import keras
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv3D, MaxPooling3D
from keras import backend as K
def get_liveness_model():
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3),
activation='relu',
input_shape=(24,100,100,1)))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (3, 3, 3), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
return model
|
StarcoderdataPython
|
8017872
|
<filename>machines/worker/code/home/management/commands/cache_movie_images.py
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Caches all the movie images available in the system for faster reloading at a later point'
def handle(self, *args, **options):
from home.models import Movie
import time
for m in Movie.objects.all():
print "Loading assets for %s" % m.name
m.get_poster_thumbnail_url()
m.get_poster_url()
#time.sleep(1)
self.stdout.write("Loaded all assets")
|
StarcoderdataPython
|
11214065
|
<reponame>farzanaaswin0708/Data-Science-Projects
#!/usr/bin/env python
import mnist
import numpy as np
import sys
"""
a simple nn classifier using L1 distance
"""
class nn_classifier:
def __init__(self, dataset):
self.images, self.labels = dataset[0], dataset[1];
"""
predict label of the given image
"""
def predict(self, image):
min_dist, label = sys.maxint, -1;
for (timage, tlabel) in zip(self.images, self.labels):
diff = timage.astype(int) - image.astype(int);
dist = sum(map(lambda x: x ** 2, diff));
if dist < min_dist:
min_dist, label = dist, tlabel;
return label;
"""
add sample to training set -- for prototype selector
"""
def addSample(self, image, label):
self.images.append(image);
self.labels.append(label);
if __name__ == "__main__":
train_dataset = mnist.read_dataset("train");
nc = nn_classifier(train_dataset);
images, labels = mnist.read_dataset("test");
print labels[10], nc.predict(images[10]);
|
StarcoderdataPython
|
12837854
|
# _*_ coding:utf-8 _*_
# 作者:hungryboy
# @Time: 2021/1/28
# @File: class01.py
print("欢迎来到hungryboy的python世界!")
def test():
print("这是一个函数,在Demo类外面")
class Demo:
print("this is a demo")
def demo(self):
print("这是Demo类中的方法!")
|
StarcoderdataPython
|
6595794
|
<gh_stars>1-10
import turtle
t = turtle.Pen()
def settings(width=1, speed=2, pencolor='red', fillcolor='yellow'):
t.width(width)
t.speed(speed)
t.pencolor(pencolor)
t.fillcolor(fillcolor)
def square(a, pen='blue', fill='green'):
t.fillcolor(fill)
t.pencolor(pen)
t.begin_fill()
for i in range(4):
t.forward(a)
t.left(90)
t.end_fill()
settings(speed=6, width=4, )
square(30, 'orange', 'blue')
t.up()
t.forward(100)
t.down()
square(40)
t.right(90)
t.up()
t.forward(100)
t.down()
square(60, '#47cc9b', '#fc0f96')
turtle.Screen().exitonclick()
|
StarcoderdataPython
|
1921250
|
import torch
import time
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from torch.utils.data import Dataset
import torch.autograd.functional as AGF
from pytorch_lightning.callbacks import EarlyStopping
import torch.linalg as linalg
from pytorch_lightning import loggers as pl_loggers
import matplotlib.pyplot as plt
from torchdiffeq import odeint
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
global_max_epoches = 1000
global_train_flag = True
global_data_size = 6
global_optimizer_lr = 5e-2
torch.manual_seed(55)
class TrainDataset(Dataset):
def __init__(self, dataSize):
MyData = torch.zeros(dataSize, 2)
MyLabel = torch.zeros(dataSize, 1)
for i in range(int(dataSize/2)):
r = 2
theta = torch.tensor(i/int(dataSize/2)*1*3.14)
MyLabel[i, :] = 0
MyData[i, :] = torch.tensor(
[r*torch.cos(theta), r*torch.sin(theta)])
plt.plot(MyData[i, 0], MyData[i, 1], 'bx',markersize=12)
MyLabel[i+int(dataSize/2), :] = 1
MyData[i+int(dataSize/2), :] = torch.tensor(
[(r+2)*torch.cos(theta), (r+2)*torch.sin(theta)])
plt.plot(MyData[i+int(dataSize/2), 0],
MyData[i+int(dataSize/2), 1], 'ro', markersize=12)
number_of_samples = 50
radius = 0.3
angle = 2*3.14*torch.linspace(0, 1, number_of_samples)
for i in range(0, len(MyData), 1):
sample = MyData[i]+torch.stack([radius*torch.cos(angle),
radius*torch.sin(angle)]).T
randindex = torch.randperm(dataSize)
self.MyData = MyData[randindex, :]
self.MyLabel = MyLabel[randindex, :]
def __len__(self):
return len(self.MyData)
def __getitem__(self, idx):
return self.MyData[idx, :], self.MyLabel[idx, :]
class classification_layer(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.fc(x)
return x
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 2),
)
def forward(self, t, x):
return self.net(x)
class swiss_roll_node(pl.LightningModule):
def __init__(self):
super().__init__()
self.t = torch.linspace(0., 0.5, 10)
self.func = ODEFunc()
self.classification_layer = classification_layer()
def node_propagation(self, x0):
# traj_x = odeint(self.func, x0, t, method='dopri5')
traj_x = odeint(self.func, x0, self.t, method='euler')
return traj_x
def forward(self, x0):
# the final value of node propagation is input to the classification and the sigmoid function
prediction_probability = torch.sigmoid(
self.classification_layer(self.node_propagation(x0)[-1]))
# output is in the raneg of (0,1)
return prediction_probability
def configure_optimizers(self):
optimizer1 = torch.optim.Adam(
self.func.parameters(), lr=global_optimizer_lr)
optimizer2 = torch.optim.Adam(
self.classification_layer.parameters(), lr=global_optimizer_lr)
def lambda1(epoch): return 0.99 ** epoch
scheduler1 = torch.optim.lr_scheduler.LambdaLR(
optimizer1, lr_lambda=lambda1)
scheduler2 = torch.optim.lr_scheduler.LambdaLR(
optimizer2, lr_lambda=lambda1)
return [optimizer1, optimizer2], [scheduler1, scheduler2]
def training_step(self, train_batch, batch_idx, optimizer_idx):
lossFunc = nn.BCELoss()
x0, label = train_batch
loss = lossFunc(self.forward(x0), label)
return loss
def test_step(self, batch, batch_idx):
data, label = batch
# below calculate the successful prediction rate on the training data
success_rate = (torch.ge(self.forward(data), 0.5).int()
== label).sum()/len(label)
self.log("success_rate", success_rate)
# plot points around the train point and the propatation
number_of_samples = 50
radius = 0.3
angle = 2*3.14*torch.linspace(0, 1, number_of_samples)
for i in range(0, len(data), 1):
if label[i] == 0:
plt.plot(data[i, 0], data[i, 1], 'bx',markersize = 5)
if label[i] == 1:
plt.plot(data[i, 0], data[i, 1], 'ro',markersize = 5)
# plt.plot(data[i, 0], data[i, 1], '+')
sample = data[i]+torch.stack([radius*torch.cos(angle),
radius*torch.sin(angle)]).T
plt.plot(sample[:, 0], sample[:, 1], '--',color = 'purple')
traj = self.node_propagation(sample)
plt.plot(traj[-1, :, 0], traj[-1, :, 1], '--',color = 'orange')
# plot the classification boundary
delta = 0.025
xrange = torch.arange(-5.0, 5.0, delta)
yrange = torch.arange(-5.0, 5.0, delta)
x, y = torch.meshgrid(xrange, yrange)
equation = self.classification_layer(
torch.stack([x.flatten(), y.flatten()]).T)-0.5
plt.contour(x, y, equation.reshape(x.size()),
[0], linestyles={'dashed'})
# plot the trajectory
data_propagation = self.node_propagation(data)
for i in range(len(data)):
ith_traj = data_propagation[:, i, :]
if label[i] == 0:
plt.plot(ith_traj[:, 0].cpu(),
ith_traj[:, 1].cpu(), '--', color='black')
if label[i] == 1:
plt.plot(ith_traj[:, 0].cpu(),
ith_traj[:, 1].cpu(), '-.', color='black')
xv, yv = torch.meshgrid(torch.linspace(-20, 20, 30),
torch.linspace(-20, 20, 30))
y1 = torch.stack([xv.flatten(), yv.flatten()])
vector_field = self.func.net(y1.T)
u = vector_field[:, 0].reshape(xv.size())
v = vector_field[:, 1].reshape(xv.size())
print('shape of u',u.shape)
plt.quiver(xv, yv, u, v, color='grey')
plt.xticks([])
plt.yticks([])
plt.xlim([-12.5, 13])
plt.ylim([-5, 7])
plt.savefig('node_propatation_neural_ODE.pdf')
return success_rate
# data
if __name__ == '__main__':
plt.figure
training_data = TrainDataset(dataSize=global_data_size)
train_dataloader = DataLoader(
training_data, batch_size=global_data_size)
model = swiss_roll_node()
trainer = pl.Trainer(gpus=None, num_nodes=1,
max_epochs=global_max_epoches)
if global_train_flag:
trainer.fit(model, train_dataloader)
trainer.save_checkpoint("example_pendulum.ckpt")
time.sleep(5)
new_model = swiss_roll_node.load_from_checkpoint(
checkpoint_path="example_pendulum.ckpt")
domain = [-10, 10, -10, 10]
N = 200
xa = np.linspace(domain[0], domain[1], N)
ya = np.linspace(domain[2], domain[3], N)
xv, yv = np.meshgrid(xa, ya)
y = np.stack([xv.flatten(), yv.flatten()])
y = np.expand_dims(y.T, axis=1)
data2d = torch.from_numpy(y).float()
with torch.no_grad():
labels = torch.ge(new_model(data2d), 0.5).int()
plt.contourf(xa, ya, labels.view(
[N, N]), levels=[-0.5, 0.5, 1.5], colors=['#99C4E2', '#EAB5A0'])
plt.xticks([])
plt.yticks([])
plt.xlim([-3, 4.8])
plt.ylim([-2, 4.8])
plt.savefig('swiss_roll_neural_ode.pdf')
print("classification boundry ploted")
plt.figure(2)
trainer = pl.Trainer(gpus=None, num_nodes=1,
max_epochs=global_max_epoches)
success_rate = trainer.test(new_model, train_dataloader)
print("after training, the successful predition rate on train set is", success_rate)
plt.show()
|
StarcoderdataPython
|
6619543
|
import os.path
activate_this = os.path.join(os.path.dirname(__file__), '../env/bin/activate_this.py')
with open(activate_this) as f:
exec(f.read(), {'__file__': activate_this})
import examples.voice.assistant_library_with_local_commands_demo as assistant
assistant.main()
|
StarcoderdataPython
|
1984964
|
from django.urls import path
from . import api_views, views
urlpatterns = [
path('', views.pm_inbox, name='personal_messages-inbox'),
path('sentbox/', views.pm_sentbox, name='personal_messages-sentbox'),
path('compose/', views.pm_compose, name='personal_messages-compose'),
path('<int:pk>/', views.pm_detail, name='personal_messages-detail'),
]
|
StarcoderdataPython
|
5123384
|
<filename>testplan/cli/utils/command_list.py
"""
Implements command list type.
"""
from typing import List, Callable
import click
class CommandList:
"""
Utility class, used for creating, storing, and registering Click commands.
"""
def __init__(self, commands: List[click.Command] = None) -> None:
"""
:param commands: list of Click commands
"""
self.commands = commands or []
def command(self, *args, **kwargs) -> Callable:
"""
Decorator that creates new Click command and adds it to command list.
"""
def inner(func):
command = click.command(*args, **kwargs)(func)
self.commands.append(command)
return command
return inner
def register_to(self, group: click.Group) -> None:
"""
Registers all commands to the given group.
:param group: Click group to register the commands to
"""
for command in self.commands:
group.add_command(command)
|
StarcoderdataPython
|
9695267
|
<filename>tests/integration/routes/test_status.py
from tests.integration.integration_test_case import IntegrationTestCase
class TestStatus(IntegrationTestCase):
def test_status_page(self):
self.get("/status")
self.assertStatusOK()
self.assertTrue("version" in self.getResponseData())
|
StarcoderdataPython
|
3497970
|
<filename>doc/example/convert_config.py
import os
import tempfile
# set up the config to convert the notebooks to html
output_dir = os.path.join(tempfile.gettempdir(), 'notebooks')
notebooks = sorted([os.path.join(output_dir, f) for f in os.listdir(output_dir) if f.endswith('.ipynb')])
c.NbConvertApp.notebooks = notebooks
|
StarcoderdataPython
|
8110303
|
<gh_stars>0
#!/usr/bin/env python3
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from Translatron.DocumentDB import YakDBDocumentDatabase, documentSerializer
from nltk.tokenize.regexp import RegexpTokenizer
from nltk.tokenize import word_tokenize
try:
import simplejson as json
except ImportError:
import json
import threading
import time
from ansicolor import blue, yellow, red
from YakDB.InvertedIndex import InvertedIndex
from Translatron.Misc.UniprotMetadatabase import initializeMetaDatabase
def has_alpha_chars(string):
return any((ch.isalnum() for ch in string))
# Initialize objects that will be passed onto the client upon request
metaDB = initializeMetaDatabase()
class TranslatronProtocol(WebSocketServerProtocol):
def __init__(self):
"""Setup a new connection"""
print(yellow("Initializing new YakDB connection"))
self.db = YakDBDocumentDatabase()
# Initialize NLTK objects
self.nerTokenizer = RegexpTokenizer(r'\s+', gaps=True)
def onConnect(self, request):
pass
def onOpen(self):
pass
def performDocumentSearch(self, query):
"""
Perform a token search on the document database.
Search is performed in multi-token prefix (all must hit) mode.
Tokens with no hits at all are ignored entirely
"""
startTime = time.time()
queryTokens = map(str.lower, word_tokenize(query))
levels = [b"title", b"content", b"metadata"]
#Remove 1-token parts from the query -- they are way too general!
#Also remove exclusively-non-alnum tokens
queryTokens = [tk for tk in queryTokens if (len(tk) > 1 and has_alpha_chars(tk))]
results = self.db.searchDocumentsMultiTokenPrefix(queryTokens, levels=levels)
#Return only those paragraphs around the hit paragraph (or the first 3 pararaphs)
for hitLocation, doc in results.items():
(docId, docLoc) = InvertedIndex.splitEntityIdPart(hitLocation)
#Compute which paragraphs to display
minShowPar = 0
maxShowPar = 2
if docLoc.startswith(b"paragraph"):
paragraphNo = int(docLoc[9:])
minShowPar = max(0, paragraphNo - 1)
maxShowPar = min(len(doc[b"paragraphs"]), paragraphNo + 1)
#Modify documents
results[hitLocation][b"hitLocation"] = docLoc
results[hitLocation][b"paragraphs"] = doc[b"paragraphs"][minShowPar:maxShowPar]
# Measure timing
timeDiff = (time.time() - startTime) * 1000.0
print("Document search for %d tokens took %.1f milliseconds" % (len(queryTokens), timeDiff))
return results
def uniquifyEntities(self, entities):
"""Remove duplicates from a list of entities (key: ["id"])"""
seen = set()
result = []
for entity in entities:
itemId = entity[b"id"]
if itemId in seen: continue
seen.add(itemId)
result.append(entity)
return result
def performEntitySearch(self, query):
"""
Search entities. Tokens are not splitted in order to allow simple search
for multi-token entities like "Biological process"
"""
results = self.db.searchEntitiesSingleTokenMultiExact([query], level=b"aliases")
#Return only result array. TODO can't we just use results[query]
if query not in results:
return []
return results[query]
def filterNERTokens(self, token):
"""
Filter function to remove stuff that just clutters the display.
"""
#Short numbers are NOT considered database IDs.
#NOTE: In reality, pretty much all numbers are Allergome database IDs, e.g. see
# http://www.allergome.org/script/dettaglio.php?id_molecule=14
if len(token) <= 5 and token.isdigit():
return False
return True
def performEntityNER(self, query):
"Search a query text for entity/entity alias hits"
startTime = time.time()
tokens = self.nerTokenizer.tokenize(query)
queryTokens = [s.encode("utf-8") for s in tokens]
# Search for case-sensitive hits
searchFN = InvertedIndex.searchSingleTokenMultiExact
results = searchFN(self.db.entityIdx.index, frozenset(filter(self.filterNERTokens, queryTokens)), level=b"aliases")
# Results contains a list of tuples (dbid, db) for each hit. dbid is db + b":" + actual ID
# We only need the actual ID, so remove the DBID prefix (which is required to avoid inadvertedly merging entries).
# This implies that the DBID MUST contain a colon!
results = {k: [(a.partition(b":")[2], b) for (a, b) in v] for k, v in results.items() if v}
#
# Multi-token NER
# Based on case-insensitive entries where only the first token is indexed.
#
# TESTING: Multi token NER
lowercaseQueryTokens = [t.lower() for t in queryTokens]
t1 = time.time()
ciResults = searchFN(self.db.entityIdx.index, frozenset(lowercaseQueryTokens), level=b"cialiases")
t2 = time.time()
print("TX " + str(t2 - t1))
for (firstTokenHit, hits) in ciResults.items():
#Find all possible locations where the full hit could start, i.e. where the first token produced a hit
possibleHitStartIndices = [i for i, x in enumerate(lowercaseQueryTokens) if x == firstTokenHit]
#Iterate over all possible
for hit in hits:
hitLoc, _, hitStr = hit[1].rpartition(b"\x1D") # Full (whitespace separated) entity name
if not hitStr: continue #Ignore malformed entries. Should usually not happen
hitTokens = [t.lower() for t in hitStr.split()]
numTokens = len(hitTokens)
#Check if at any possible hit start index the same tokens occur (in the same order )
for startIdx in possibleHitStartIndices:
actualTokens = lowercaseQueryTokens[startIdx : startIdx+numTokens]
#Check if the lists are equal. Shortcut for single-token hits
if numTokens == 1 or all((a == b for a, b in zip(actualTokens, hitTokens))):
#Reconstruct original (case-sensitive) version of the hit
csTokens = queryTokens[startIdx : startIdx+numTokens]
#NOTE: This MIGHT cause nothing to be highlighted, if the reconstruction
# of the original text is not equal to the actual text. This is true exactly
# if the tokenizer removes or changes characters besides whitespace in the text.
csHit = b" ".join(csTokens)
# Emulate defaultdict behaviour
if not csHit in results: results[csHit] = []
results[csHit].append((hitStr, hitLoc))
t3 = time.time()
print("TY " + str(t3 - t2))
# TODO: Remove results which are subsets of other hits. This occurs only if we have multi-token results
removeKeys = set() # Can't modify dict while iterating it, so aggregate keys to delete
for key in results.keys():
# Ignore single part results
if any((chr(c).isspace() for c in key)):
tokens = key.split()
for token in tokens:
# Remove sub-hit in results.
# This avoids the possibility of highlighting the smaller hit
if token in results:
removeKeys.add(token)
# Remove aggregated keys
for key in removeKeys:
del results[key]
# Result: For each token with hits --> (DBID, Database name)
# Just takes the first DBID.It is unlikely that different DBIDs are found, but we
# can only link to one using the highlighted label
ret = {k: (v[0][0], v[0][1]) for k, v in results.items() if v}
# Measure timing
timeDiff = (time.time() - startTime) * 1000.0
print("NER for %d tokens took %.1f milliseconds" % (len(queryTokens), timeDiff))
return ret
def onMessage(self, payload, isBinary):
request = json.loads(payload.decode('utf8'))
# Perform action depending on query type
qtype = request["qtype"]
if qtype == "docsearch":
results = self.performDocumentSearch(request["term"])
del request["term"]
request["results"] = list(results.values())
elif qtype == "ner":
results = self.performEntityNER(request["query"])
del request["query"]
request["results"] = results
elif qtype == "metadb":
# Send meta-database to generate
request["results"] = metaDB
elif qtype == "entitysearch":
request["entities"] = self.performEntitySearch(request["term"])
del request["term"]
elif qtype == "getdocuments":
# Serve one or multiple documents by IDs
docIds = [s.encode() for s in request["query"]]
request["results"] = self.db.docIdx.findEntities(docIds)
del request["query"]
else:
print(red("Unknown websocket request type: %s" % request["qtype"], bold=True))
return # Do not send reply
#Return modified request object: Keeps custom K/V pairs but do not re-send query
self.sendMessage(json.dumps(request, default=documentSerializer).encode("utf-8"), False)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
def startWebsocketServer():
print(blue("Websocket server starting up..."))
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
#Asyncio only setups an event loop in the main thread, else we need to
if threading.current_thread().name != 'MainThread':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
factory = WebSocketServerFactory("ws://0.0.0.0:9000", debug = False)
factory.protocol = TranslatronProtocol
loop = asyncio.get_event_loop()
server = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(server)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
|
StarcoderdataPython
|
1945936
|
from mantid.simpleapi import *
from matplotlib import pyplot as plt
import sys
thisdir = os.path.abspath(os.path.dirname(__file__))
if thisdir not in sys.path:
sys.path.insert(0, thisdir)
def detector_position_for_reduction(path, conf, SNAP_definition_file, saved_file_path):
sim=Load(path)
AddSampleLog(sim, LogName='det_arc1', LogText= '{}'.format(conf.mon1), LogType='Number Series', NumberType='Double')
AddSampleLog(sim, LogName='det_arc2', LogText= '{}'.format(conf.mon2), LogType='Number Series', NumberType='Double')
LoadInstrument(sim, Filename=SNAP_definition_file, RewriteSpectraMap='True')
SaveNexus(sim, saved_file_path)
|
StarcoderdataPython
|
1848298
|
import torch
import functools
import numpy as np
import librosa
import online_scd.data as data
class InputFrameGenerator(object):
def __init__(self, blocksize, stepsize):
self.blocksize = blocksize
self.stepsize = stepsize
self.buffer = None
def frames(self, frames):
if self.buffer is not None:
stack = np.concatenate([self.buffer, frames])
else:
stack = frames.copy()
stack_length = len(stack)
nb_frames = (
stack_length - self.blocksize + self.stepsize) // self.stepsize
nb_frames = max(nb_frames, 0)
frames_length = nb_frames * self.stepsize + \
self.blocksize - self.stepsize
last_block_size = stack_length - frames_length
self.buffer = stack[int(nb_frames * self.stepsize):]
for index in range(0, int(nb_frames * self.stepsize), int(self.stepsize)):
yield stack[index:index + self.blocksize]
class StreamingSlidingWindowCmn:
def __init__(self, num_feats, cmn_window=600):
self.cmn_window = cmn_window
self.rolling_position = 0
self.rolling_buffer = np.zeros((num_feats, cmn_window))
self.buffer_length = 0
def process(self, frame):
self.rolling_buffer[:, self.rolling_position] = frame
self.rolling_position = (self.rolling_position + 1) % self.cmn_window
self.buffer_length = min(self.buffer_length + 1, self.cmn_window)
return frame - self.rolling_buffer[:, 0:self.buffer_length].mean(1)
class AudioStream2MelSpectrogram:
def __init__(self, sample_rate=16000, num_fbanks=40, cmn_window=600):
self.sample_rate = sample_rate
self.num_fbanks = num_fbanks
self.input_frame_generator = InputFrameGenerator(400, 160)
self.cmn = StreamingSlidingWindowCmn(num_fbanks, cmn_window)
def process_audio(self, audio):
for frames in self.input_frame_generator.frames(audio):
single_feat = librosa.feature.melspectrogram(frames, sr=self.sample_rate,
center=False,
n_fft=int(2.5*self.sample_rate/100.0), hop_length=self.sample_rate//100,
fmin=40, fmax=self.sample_rate//2-400, n_mels=self.num_fbanks)
single_feat = single_feat[:, 0]
single_feat = np.log(np.clip(single_feat, data.EPSILON.numpy(), None))
single_feat = self.cmn.process(single_feat)
yield single_feat
class StreamingDecoder:
def __init__(self, model):
self.model = model
self.model.eval()
self.audio2mel = AudioStream2MelSpectrogram(16000, model.hparams.num_fbanks)
self.mels_to_conv_input = InputFrameGenerator(model.encoder_fov, model.hparams.detection_period)
self.hidden_state = None
self.frame_counter = 0
self.discard_counter = 0
def process_audio(self, audio):
for feature in self.audio2mel.process_audio(audio):
for x in self.mels_to_conv_input.frames(feature.reshape(1, self.model.hparams.num_fbanks)):
x = torch.from_numpy(x).permute(1, 0).unsqueeze(0).float()
x = self.model.encode_windowed_features(x)
y, self.hidden_state = self.model.decode_single_timestep(x, self.hidden_state)
probs = y.log_softmax(dim=-1).exp()
if self.discard_counter < (self.model.hparams.label_delay - self.model.encoder_fov//2) // self.model.hparams.detection_period:
# we discard output for the 1st second (or whatever the label delay is)
self.discard_counter += 1
else:
yield probs.squeeze()
def find_speaker_change_times(self, audio, threshold=0.5):
for y in self.process_audio(audio):
if y[1] > threshold:
change_time = self.frame_counter / 100
if change_time > 0:
yield change_time
self.frame_counter += 10
|
StarcoderdataPython
|
3237787
|
import argparse
import sqlite3
from urllib.parse import urlparse, parse_qsl, unquote, quote
import lxml.html
import requests
def create_connection(db_file):
try:
connection = sqlite3.connect(db_file)
return connection
except Exception as e:
print(e)
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--year', help='Year to be processed', default='2008') # Defaults to the oldest year
args = parser.parse_args()
if args.year:
year = args.year
exceptions = {'3B46301A6C8B850D87A730DA365B0960', 'E5FEFE44A3070BC9FC176503EC1A603F', '0C1AFF9AEAA0953F1B1F9B818C2771C9', '7C912BA5616D2E24E9F700D90E4BA2B6', '905BB7DE4BC4E29D7FD2D1969667B568', '773B14EEB416FA762C443D909FFED344', '1C0DB4785FC78DF4395263D40261C614', '5066110701B0AE95948A158F0B262EBB', '5651A6C10C4D375A1901142C49C5C70C', '8BED72498D055E55ABCA7AD29B180BF4'}
parse_link = f'https://www.chickensmoothie.com/archive/{year}/'
print(f'Parsing: "{parse_link}"')
response = requests.get(parse_link)
dom = lxml.html.fromstring(response.text)
event_links = dom.xpath('//li[@class="event active"]/a/@href') + dom.xpath('//li[@class="event "]/a/@href') # Get the links to all the monthlies and special pets
for event in event_links: # For each event
event = quote(event)
base_link = f'https://www.chickensmoothie.com{event}' # The link to the event archive
response = requests.get(base_link) # Get the HTML
dom = lxml.html.fromstring(response.text)
event_title = unquote(event[14:-1])
print(f'Finding pets from {event_title} Event')
if len(dom.xpath('//div[@class="pages"]')) == 0: # If there is only the current page
pages = 1
else: # If there are other pages of pets
pages = len(dom.xpath('//div[@class="pages"][1]/a')) # Get the number of pages
print(f'{pages} page(s) found')
for page in range(pages): # For each page
if page == 0:
link = base_link
else:
current_page = page * 7
link = f'{base_link}?pageStart={current_page}'
response = requests.get(link)
dom = lxml.html.fromstring(response.text)
print(f'Finding pets from "{link}" page')
image_links = dom.xpath('//img[@alt="Pet"]/@src')
pet_ids = set()
for image_link in image_links:
components = urlparse(image_link)
parameters = dict(parse_qsl(components.query))
if parameters['k'] not in exceptions:
pet_ids.add(parameters['k'])
conn = create_connection('cs_archive.sqlite3')
c = conn.cursor()
counter = 0
for pet_id in pet_ids:
try:
c.execute('INSERT INTO ChickenSmoothie_Archive (Pet_ID, Year, Event, Archive_Link) VALUES (?, ?, ?, ?)', (pet_id, year, event_title, link))
counter += 1
except sqlite3.IntegrityError:
print(f'WARNING: Pet ID {pet_id} already exists in database.')
print(f'Inserted {counter} row(s)')
conn.commit()
conn.close()
print('\n')
conn = create_connection('cs_archive.sqlite3')
c = conn.cursor()
c.execute('UPDATE ChickenSmoothie_Archive SET Archive_Link=? WHERE Archive_Link=?', (f'https://www.chickensmoothie.com/archive/{year}/Valentine%27s%20Day/', f'https://www.chickensmoothie.com/archive/{year}/Valentine%2527s%20Day/'))
c.execute('UPDATE ChickenSmoothie_Archive SET Archive_Link=? WHERE Archive_Link=?', (f'https://www.chickensmoothie.com/archive/{year}/Valentine%27s/', f'https://www.chickensmoothie.com/archive/{year}/Valentine%2527s/'))
c.execute('UPDATE ChickenSmoothie_Archive SET Archive_Link=? WHERE Archive_Link=?', (f'https://www.chickensmoothie.com/archive/{year}/St.%20Patrick%27s%20Day/', f'https://www.chickensmoothie.com/archive/{year}/St.%20Patrick%2527s%20Day/'))
c.execute('UPDATE ChickenSmoothie_Archive SET Archive_Link=? WHERE Archive_Link=?', (f'https://www.chickensmoothie.com/archive/{year}/April%20Fool%27s/', f'https://www.chickensmoothie.com/archive/{year}/April%20Fool%2527s/'))
c.execute('UPDATE ChickenSmoothie_Archive SET Event=? WHERE Event=?', ("Valentine's Day", 'Valentine%27s Day'))
c.execute('UPDATE ChickenSmoothie_Archive SET Event=? WHERE Event=?', ("Valentine's", 'Valentine%27s'))
c.execute('UPDATE ChickenSmoothie_Archive SET Event=? WHERE Event=?', ("St. Patrick's Day", 'St. Patrick%27s Day'))
c.execute('UPDATE ChickenSmoothie_Archive SET Event=? WHERE Event=?', ("April Fool's", 'April Fool%27s'))
conn.commit()
conn.close()
|
StarcoderdataPython
|
3303987
|
from __future__ import print_function
import time
import redis
import logging
import traceback
from django.conf import settings
from .models import CountBeansTask
from django_task.job import Job
from rq import get_current_job
class CountBeansJob(Job):
@staticmethod
def execute(job, task):
params = task.retrieve_params_as_dict()
num_beans = params['num_beans']
for i in range(0, num_beans):
time.sleep(0.01)
task.set_progress((i + 1) * 100 / num_beans, step=10)
|
StarcoderdataPython
|
58717
|
class _EventTarget:
'''https://developer.mozilla.org/en-US/docs/Web/API/EventTarget'''
NotImplemented
class _Node(_EventTarget):
'''https://developer.mozilla.org/en-US/docs/Web/API/Node'''
NotImplemented
class _Element(_Node):
'''ref of https://developer.mozilla.org/en-US/docs/Web/API/Element'''
NotImplemented
|
StarcoderdataPython
|
67052
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import inspect
import os
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import service
from oslo_utils import importutils
from osprofiler import profiler
from trove.common import cfg
from trove.common.i18n import _
from trove.common import profile
from trove.common.rpc import secure_serializer as ssz
from trove import rpc
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class RpcService(service.Service):
def __init__(self, key, host=None, binary=None, topic=None, manager=None,
rpc_api_version=None, secure_serializer=ssz.SecureSerializer):
super(RpcService, self).__init__()
self.key = key
self.host = host or CONF.host
self.binary = binary or os.path.basename(inspect.stack()[-1][1])
self.topic = topic or self.binary.rpartition('trove-')[2]
_manager = importutils.import_object(manager)
self.manager_impl = profiler.trace_cls("rpc")(_manager)
self.rpc_api_version = rpc_api_version or \
self.manager_impl.RPC_API_VERSION
self.secure_serializer = secure_serializer
profile.setup_profiler(self.binary, self.host)
def start(self):
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host,
version=self.rpc_api_version)
if not hasattr(self.manager_impl, 'target'):
self.manager_impl.target = target
endpoints = [self.manager_impl]
self.rpcserver = rpc.get_server(
target, endpoints, key=self.key,
secure_serializer=self.secure_serializer)
self.rpcserver.start()
# TODO(hub-cap): Currently the context is none... do we _need_ it here?
report_interval = CONF.report_interval
if report_interval > 0:
pulse = loopingcall.FixedIntervalLoopingCall(
self.manager_impl.run_periodic_tasks, context=None)
pulse.start(interval=report_interval,
initial_delay=report_interval)
pulse.wait()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
except Exception:
LOG.info(_("Failed to stop RPC server before shutdown. "))
pass
super(RpcService, self).stop()
|
StarcoderdataPython
|
3461813
|
from datetime import date, datetime
from decimal import Decimal
from base.models import BaseModel
from carteiras.models import CentroCusto
from dateutil.relativedelta import relativedelta
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
class Cartao(BaseModel):
PERMISSAO_PARCELAMENTO_POSITIVO = True
PERMISSAO_PARCELAMENTO_NEGATIVO = False
ESCOLHAS_PERMISSAO_PARCELAMENTO = [
(PERMISSAO_PARCELAMENTO_POSITIVO, "Sim"),
(PERMISSAO_PARCELAMENTO_NEGATIVO, "Não"),
]
nome = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
valor_limite = models.DecimalField(max_digits=11, decimal_places=2)
valor_total = models.DecimalField(max_digits=11, decimal_places=2, default=Decimal("0.0"))
dia_fechamento = models.IntegerField(
default=1, validators=[MaxValueValidator(25), MinValueValidator(1)]
)
pode_parcelar = models.BooleanField(choices=ESCOLHAS_PERMISSAO_PARCELAMENTO)
centro_custo = models.OneToOneField(
CentroCusto, on_delete=models.CASCADE, related_name="cartao"
)
class Meta:
ordering = ["nome"]
unique_together = (("centro_custo_id", "slug"),)
indexes = [
models.Index(fields=["centro_custo_id", "slug"]),
]
def __str__(self):
return f"{self.nome}"
@property
def proximo_fechamento(self) -> date:
hoje = datetime.now().date()
try:
data_fechamento = hoje.replace(self.dia_fechamento)
except BaseException:
data_fechamento = (hoje + relativedelta(months=1)).replace(
day=1
) - relativedelta(days=1)
if data_fechamento < hoje:
data_fechamento = data_fechamento + relativedelta(months=1)
return data_fechamento
@property
def tem_lancamentos(self) -> bool:
return self.centro_custo.lancamentos.exists()
def tem_limite(self, valor: Decimal) -> bool:
return not (self.valor_total + valor) > self.valor_limite
def adicionar_despesa(self, valor: Decimal) -> "Cartao":
if self.tem_limite(valor):
self.valor_total = self.valor_total + valor
else:
raise Exception("Cartão não tem limite para a despesa!")
return self
def adicionar_receita(self, valor: Decimal) -> "Cartao":
self.valor_total = self.valor_total - valor
return self
|
StarcoderdataPython
|
11342070
|
from feedly.feeds.aggregated_feed.base import AggregatedFeed
from feedly.serializers.aggregated_activity_serializer import \
NotificationSerializer
from feedly.storage.redis.timeline_storage import RedisTimelineStorage
import copy
import datetime
import json
import logging
logger = logging.getLogger(__name__)
class NotificationFeed(AggregatedFeed):
'''
Similar to an aggregated feed, but:
- doesnt use the activity storage (serializes everything into the timeline storage)
- features denormalized counts
- pubsub signals which you can subscribe to
For now this is entirely tied to Redis
'''
#: notification feeds only need a small max length
max_length = 99
key_format = 'notification_feed:1:user:%(user_id)s'
#: the format we use to denormalize the count
count_format = 'notification_feed:1:user:%(user_id)s:count'
#: the key used for locking
lock_format = 'notification_feed:1:user:%s:lock'
#: the main channel to publish
pubsub_main_channel = 'juggernaut'
timeline_serializer = NotificationSerializer
activity_storage_class = None
activity_serializer = None
def __init__(self, user_id, **kwargs):
'''
User id (the user for which we want to read/write notifications)
'''
AggregatedFeed.__init__(self, user_id, **kwargs)
# location to which we denormalize the count
self.format_dict = dict(user_id=user_id)
self.count_key = self.count_format % self.format_dict
# set the pubsub key if we're using it
self.pubsub_key = user_id
self.lock_key = self.lock_format % self.format_dict
from feedly.storage.redis.connection import get_redis_connection
self.redis = get_redis_connection()
def add_many(self, activities, **kwargs):
'''
Similar to the AggregatedActivity.add_many
The only difference is that it denormalizes a count of unseen activities
'''
with self.redis.lock(self.lock_key, timeout=2):
current_activities = AggregatedFeed.add_many(
self, activities, **kwargs)
# denormalize the count
self.denormalize_count()
# return the current state of the notification feed
return current_activities
def get_denormalized_count(self):
'''
Returns the denormalized count stored in self.count_key
'''
result = self.redis.get(self.count_key) or 0
result = int(result)
return result
def set_denormalized_count(self, count):
'''
Updates the denormalized count to count
:param count: the count to update to
'''
self.redis.set(self.count_key, count)
self.publish_count(count)
def publish_count(self, count):
'''
Published the count via pubsub
:param count: the count to publish
'''
count_dict = dict(unread_count=count, unseen_count=count)
count_data = json.dumps(count_dict)
data = {'channel': self.pubsub_key, 'data': count_data}
encoded_data = json.dumps(data)
self.redis.publish(self.pubsub_main_channel, encoded_data)
def denormalize_count(self):
'''
Denormalize the number of unseen aggregated activities to the key
defined in self.count_key
'''
# now count the number of unseen
count = self.count_unseen()
# and update the count if it changed
stored_count = self.get_denormalized_count()
if stored_count != count:
self.set_denormalized_count(count)
return count
def count_unseen(self, aggregated_activities=None):
'''
Counts the number of aggregated activities which are unseen
:param aggregated_activities: allows you to specify the aggregated
activities for improved performance
'''
count = 0
if aggregated_activities is None:
aggregated_activities = self[:self.max_length]
for aggregated in aggregated_activities:
if not aggregated.is_seen():
count += 1
return count
def mark_all(self, seen=True, read=None):
'''
Mark all the entries as seen or read
:param seen: set seen_at
:param read: set read_at
'''
with self.redis.lock(self.lock_key, timeout=10):
# get the current aggregated activities
aggregated_activities = self[:self.max_length]
# create the update dict
update_dict = {}
for aggregated_activity in aggregated_activities:
changed = False
old_activity = copy.deepcopy(aggregated_activity)
if seen is True and not aggregated_activity.is_seen():
aggregated_activity.seen_at = datetime.datetime.today()
changed = True
if read is True and not aggregated_activity.is_read():
aggregated_activity.read_at = datetime.datetime.today()
changed = True
if changed:
update_dict[old_activity] = aggregated_activity
# send the diff to the storage layer
new, deleted = [], []
changed = update_dict.items()
self._update_from_diff(new, changed, deleted)
# denormalize the count
self.denormalize_count()
# return the new activities
return aggregated_activities
class RedisNotificationFeed(NotificationFeed):
timeline_storage_class = RedisTimelineStorage
|
StarcoderdataPython
|
6616118
|
<gh_stars>10-100
import tensorflow as tf
from tfsnippet.utils import assert_deps
from .base import Distribution
from .wrapper import as_distribution
__all__ = ['BatchToValueDistribution']
class BatchToValueDistribution(Distribution):
"""
Distribution that converts the last few `batch_ndims` into `values_ndims`.
See :meth:`Distribution.batch_ndims_to_value` for more details.
"""
def __init__(self, distribution, ndims):
"""
Construct a new :class:`BatchToValueDistribution`.
Args:
distribution (Distribution): The source distribution.
ndims (int): The last few `batch_ndims` to be converted
into `value_ndims`. Must be non-negative.
"""
distribution = as_distribution(distribution)
ndims = int(ndims)
if ndims < 0:
raise ValueError('`ndims` must be non-negative integers: '
'got {!r}'.format(ndims))
with tf.name_scope('BatchToValueDistribution.init'):
# get new batch shape
batch_shape = distribution.batch_shape
batch_static_shape = distribution.get_batch_shape()
if ndims > 0:
# static shape
if batch_static_shape.ndims < ndims:
raise ValueError(
'`distribution.batch_shape.ndims` is less then `ndims`'
': distribution {}, batch_shape.ndims {}, ndims {}'.
format(distribution, batch_static_shape.ndims, ndims)
)
batch_static_shape = batch_static_shape[: -ndims]
# dynamic shape
batch_shape = batch_shape[: -ndims]
with assert_deps([
tf.assert_greater_equal(
tf.size(distribution.batch_shape), ndims)
]) as asserted:
if asserted: # pragma: no cover
batch_shape = tf.identity(batch_shape)
# get new value ndims
value_ndims = ndims + distribution.value_ndims
self._distribution = distribution
self._ndims = ndims
super(BatchToValueDistribution, self).__init__(
dtype=distribution.dtype,
is_continuous=distribution.is_continuous,
is_reparameterized=distribution.is_reparameterized,
batch_shape=batch_shape,
batch_static_shape=batch_static_shape,
value_ndims=value_ndims,
)
@property
def base_distribution(self):
"""
Get the base distribution.
Returns:
Distribution: The base distribution.
"""
return self._distribution
def expand_value_ndims(self, ndims):
ndims = int(ndims)
if ndims == 0:
return self
return BatchToValueDistribution(
self._distribution, ndims + self._ndims)
batch_ndims_to_value = expand_value_ndims
def sample(self, n_samples=None, group_ndims=0, is_reparameterized=None,
compute_density=None, name=None):
from tfsnippet.bayes import StochasticTensor
group_ndims = int(group_ndims)
t = self._distribution.sample(
n_samples=n_samples,
group_ndims=group_ndims + self._ndims,
is_reparameterized=is_reparameterized,
compute_density=compute_density,
name=name
)
ret = StochasticTensor(
distribution=self,
tensor=t.tensor,
n_samples=n_samples,
group_ndims=group_ndims,
is_reparameterized=t.is_reparameterized,
log_prob=t._self_log_prob
)
ret._self_prob = t._self_prob
return ret
def log_prob(self, given, group_ndims=0, name=None):
group_ndims = int(group_ndims)
return self._distribution.log_prob(
given=given,
group_ndims=group_ndims + self._ndims,
name=name
)
|
StarcoderdataPython
|
1805509
|
<reponame>paulo-raca/python-progressbar<gh_stars>0
import time
import pytest
import progressbar
max_values = [None, 10, progressbar.UnknownLength]
def test_widgets_small_values():
widgets = [
'Test: ',
progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.ETA(),
' ',
progressbar.AbsoluteETA(),
' ',
progressbar.FileTransferSpeed(),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=10).start()
p.update(0)
for i in range(10):
time.sleep(1)
p.update(i + 1)
p.finish()
@pytest.mark.parametrize('max_value', [10 ** 6, 10 ** 8])
def test_widgets_large_values(max_value):
widgets = [
'Test: ',
progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.ETA(),
' ',
progressbar.AbsoluteETA(),
' ',
progressbar.FileTransferSpeed(),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=max_value).start()
for i in range(0, 10 ** 6, 10 ** 4):
time.sleep(1)
p.update(i + 1)
p.finish()
def test_format_widget():
widgets = []
for mapping in progressbar.FormatLabel.mapping:
widgets.append(progressbar.FormatLabel('%%(%s)r' % mapping))
p = progressbar.ProgressBar(widgets=widgets)
for i in p(range(10)):
time.sleep(1)
@pytest.mark.parametrize('max_value', [None, 10])
def test_all_widgets_small_values(max_value):
widgets = [
progressbar.Timer(),
progressbar.ETA(),
progressbar.AdaptiveETA(),
progressbar.AbsoluteETA(),
progressbar.DataSize(),
progressbar.FileTransferSpeed(),
progressbar.AdaptiveTransferSpeed(),
progressbar.AnimatedMarker(),
progressbar.Counter(),
progressbar.Percentage(),
progressbar.FormatLabel('%(value)d'),
progressbar.SimpleProgress(),
progressbar.Bar(),
progressbar.ReverseBar(),
progressbar.BouncingBar(),
progressbar.CurrentTime(),
progressbar.CurrentTime(microseconds=False),
progressbar.CurrentTime(microseconds=True),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=max_value)
for i in range(10):
time.sleep(1)
p.update(i + 1)
p.finish()
@pytest.mark.parametrize('max_value', [10 ** 6, 10 ** 7])
def test_all_widgets_large_values(max_value):
widgets = [
progressbar.Timer(),
progressbar.ETA(),
progressbar.AdaptiveETA(),
progressbar.AbsoluteETA(),
progressbar.DataSize(),
progressbar.FileTransferSpeed(),
progressbar.AdaptiveTransferSpeed(),
progressbar.AnimatedMarker(),
progressbar.Counter(),
progressbar.Percentage(),
progressbar.FormatLabel('%(value)d/%(max_value)d'),
progressbar.SimpleProgress(),
progressbar.Bar(fill=lambda progress, data, width: '#'),
progressbar.ReverseBar(),
progressbar.BouncingBar(),
progressbar.FormatCustomText('Custom %(text)s', dict(text='text')),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=max_value)
p.update()
time.sleep(1)
p.update()
for i in range(0, 10 ** 6, 10 ** 4):
time.sleep(1)
p.update(i)
@pytest.mark.parametrize('min_width', [None, 1, 2, 80, 120])
@pytest.mark.parametrize('term_width', [1, 2, 80, 120])
def test_all_widgets_min_width(min_width, term_width):
widgets = [
progressbar.Timer(min_width=min_width),
progressbar.ETA(min_width=min_width),
progressbar.AdaptiveETA(min_width=min_width),
progressbar.AbsoluteETA(min_width=min_width),
progressbar.DataSize(min_width=min_width),
progressbar.FileTransferSpeed(min_width=min_width),
progressbar.AdaptiveTransferSpeed(min_width=min_width),
progressbar.AnimatedMarker(min_width=min_width),
progressbar.Counter(min_width=min_width),
progressbar.Percentage(min_width=min_width),
progressbar.FormatLabel('%(value)d', min_width=min_width),
progressbar.SimpleProgress(min_width=min_width),
progressbar.Bar(min_width=min_width),
progressbar.ReverseBar(min_width=min_width),
progressbar.BouncingBar(min_width=min_width),
progressbar.FormatCustomText('Custom %(text)s', dict(text='text'),
min_width=min_width),
progressbar.DynamicMessage('custom', min_width=min_width),
progressbar.CurrentTime(min_width=min_width),
]
p = progressbar.ProgressBar(widgets=widgets, term_width=term_width)
p.update(0)
p.update()
for widget in p._format_widgets():
if min_width and min_width > term_width:
assert widget == ''
else:
assert widget != ''
@pytest.mark.parametrize('max_width', [None, 1, 2, 80, 120])
@pytest.mark.parametrize('term_width', [1, 2, 80, 120])
def test_all_widgets_max_width(max_width, term_width):
widgets = [
progressbar.Timer(max_width=max_width),
progressbar.ETA(max_width=max_width),
progressbar.AdaptiveETA(max_width=max_width),
progressbar.AbsoluteETA(max_width=max_width),
progressbar.DataSize(max_width=max_width),
progressbar.FileTransferSpeed(max_width=max_width),
progressbar.AdaptiveTransferSpeed(max_width=max_width),
progressbar.AnimatedMarker(max_width=max_width),
progressbar.Counter(max_width=max_width),
progressbar.Percentage(max_width=max_width),
progressbar.FormatLabel('%(value)d', max_width=max_width),
progressbar.SimpleProgress(max_width=max_width),
progressbar.Bar(max_width=max_width),
progressbar.ReverseBar(max_width=max_width),
progressbar.BouncingBar(max_width=max_width),
progressbar.FormatCustomText('Custom %(text)s', dict(text='text'),
max_width=max_width),
progressbar.DynamicMessage('custom', max_width=max_width),
progressbar.CurrentTime(max_width=max_width),
]
p = progressbar.ProgressBar(widgets=widgets, term_width=term_width)
p.update(0)
p.update()
for widget in p._format_widgets():
if max_width and max_width < term_width:
assert widget == ''
else:
assert widget != ''
|
StarcoderdataPython
|
4841888
|
# 1 LAYER , TENSOrflow layers api
import tensorflow as tf
import gym
import numpy as np
num_inputs = 4 #4 inputs are the 2 velocities, position , angle
num_hidden = 4
num_outputs = 1 # 1 output, either the probability to got left or right
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32,shape = [None,num_inputs])
hidden_layer_one = tf.layers.dense(X,num_hidden, activation = tf.nn.relu,kernel_initializer = initializer)
hidden_layer_two = tf.layers.dense(hidden_layer_one,num_hidden, activation = tf.nn.relu,kernel_initializer = initializer)
# THE PROBABILITY IS JUST 0 OR 1 , SO WE'LL USE THE SIGMOID ACTVATION
output_layer = tf.layers.dense(hidden_layer_two,num_outputs,activation= tf.nn.sigmoid, kernel_initializer = initializer)
probabilities = tf.concat(axis = 1 ,values = [output_layer,1-output_layer])
action = tf.multinomial(probabilities,num_samples = 1)
init = tf.global_variables_initializer()
epi = 50
step_limit = 500 #very unlikely we'll ever hit 500 steps
env = gym.make('CartPole-v0')
avg_steps = [] # h
with tf.Session() as sess:
sess.run(init)
for i_episode in range(epi):
obs = env.reset()
for step in range (step_limit):
action_value = action.eval(feed_dict = {X:obs.reshape(1,num_inputs)})
# Now we'll pass in the step =function
obs,reward,done,info = env.step(action_value[0][0]) # 0 or 1
if done:
avg_steps.append(step)
print("Done after {} Steps".format(step))
break
print("After {} Episodes, average steps per game was {}".format(epi ,np.mean(avg_steps)))
env.close()
|
StarcoderdataPython
|
3525902
|
<gh_stars>10-100
"""
这是一个仪表图像处理实例,基于k210芯片,歪朵拉开发板。
同级根目录下mnist.kmodel需放置于sd卡,请讲sd卡命名为sd。
TODO:
1、更好的针对印刷体数据集的数据增广;
2、指针自适应特征颜色提取范围;
"""
from fpioa_manager import fm, board_info
from machine import UART
import utime
fm.register(board_info.PIN9,fm.fpioa.UART2_TX)
fm.register(board_info.PIN10,fm.fpioa.UART2_RX)
uart_B = UART(UART.UART2, 115200, 8, None, 1, timeout=10)
import sensor, image, time, lcd, math
import KPU as kpu
#task = kpu.load("/sd/paste_mnist.kmodel")
task = kpu.load("/sd/mnist.kmodel")
info=kpu.netinfo(task)
lcd.init(freq=15000000)
sensor.reset() # Reset and initialize the sensor. It will
# run automatically, call sensor.run(0) to stop
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_vflip(True)
sensor.set_auto_gain(True)
sensor.set_auto_whitebal(True)
sensor.set_gainceiling(8)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
def mnist_run(img, dx, dy, dis, x00 =0, y00 = 80, nnn = 2):
if nnn == 4:
x00 = x00
dy = dy
img0 = img.copy((x00+dis*nnn,y00+nnn*0, dx, dy))
#img0.mean(2, threshold=True, offset=1, invert=True) #A
img0.median(2, percentile=0.3, threshold=True, offset=-3, invert=True)
#img0.midpoint(2, bias=0.3, threshold=True, offset=0, invert=True)
#img0.mode(2, threshold=True, offset=0, invert=True) #B
#img0.binary([(110,255)], invert = True)
for dx0 in range(dx):
for dy0 in range(dy):
a0 = img0.get_pixel(dx0,dy0)
img.set_pixel(x00+dis*nnn+dx0,y00+nnn*0+dy0,a0)
#img1 = img0.copy((1,1, dx-1, dy-1))
img1 = img0
img1 = img1.resize(28,28)
img1 = img1.to_grayscale(1)
img1.pix_to_ai()
fmap=kpu.forward(task,img1)
plist=fmap[:]
pmax=max(plist)
max_index=plist.index(pmax)
kpu.fmap_free(fmap)
return max_index, pmax
def search_col(x_input, y_input, img, width = 320, height = 240):
x_l = []
y_l = []
for x in range(x_input - 32,x_input + 32):
for y in range(y_input - 32,y_input + 32):
if math.sqrt((x-x_input)*(x-x_input) + (y-y_input)*(y-y_input))<32 and math.sqrt((x-x_input)*(x-x_input) + (y-y_input)*(y-y_input))>14:
col = img.get_pixel(x,y)
if col[0]>120 and col[1]<100 and col[2]<100:
x_l.append(x-x_input)
y_l.append(-y+y_input)
#img.set_pixel(x,y,(255,255,255))
#else:
#img.set_pixel(x,y,(0,0,0))
angle_count = 0
le = 0
x_c = 0
y_c = 0
for i in range(len(x_l)):
leng = math.sqrt(x_l[i]**2 + y_l[i]**2)
le = le + leng
angle_count = angle_count + math.acos(y_l[i]/leng)*leng
x_c = x_c + x_l[i]
y_c = y_c + y_l[i]
if le == 0:
angle = 0
else:
angle = angle_count/le
dx = 0
dy = 0
dx = int(30 * math.sin(angle))
dy = int(30 * math.cos(angle))
if x_c < 0:
angle = -angle + 2*math.pi
dx = -dx
img.draw_line((x_input, y_input,x_input+dx, y_input-dy), thickness = 2, color=(0,0,255))
return angle/math.pi*180
num_list = [0, 0, 0, 0, 5]
p_list = [0,0,0,0,0]
angle_list = [0,0,0,0]
while(True):
count_0 = 0
count_4 = 0
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
#img.mean(1, threshold=True, offset=5, invert=True)
#img.binary([(100,255)], invert = True)
#img.erode(1)
x00 = 91
y00 = 4
dx = 20
dy = 20
dis = 25
p_thre = 0.95
for i in range(0,5):
class_num, pmax = mnist_run(img, dx, dy, dis,\
x00 =x00, y00 = y00,\
nnn=i)
if pmax > p_thre:
num_list[i] = class_num
p_list[i] = pmax
for i in range(0,5):
if i == 4:
x00 = x00
dy = dy
img.draw_rectangle((x00+dis*i,y00+i*0, dx, dy), color=255)
R_list = []
c_color = []
x_list = [101+3, 175+2, 241, 263]
y_list = [176-6, 193-6, 156-6, 84-6]
angle_list[0] = search_col(x_list[0], y_list[0], img, width = 320, height = 240)
angle_list[1] = search_col(x_list[1], y_list[1], img, width = 320, height = 240)
angle_list[2] = search_col(x_list[2], y_list[2], img, width = 320, height = 240)
angle_list[3] = search_col(x_list[3], y_list[3], img, width = 320, height = 240)
print(num_list)
print(p_list)
#print(angle_list)
R = 32
img.draw_circle(x_list[0], y_list[0], R, color = (255, 0, 0), thickness = 2, fill = False)
img.draw_circle(x_list[1], y_list[1], R, color = (255, 0, 0), thickness = 2, fill = False)
img.draw_circle(x_list[2], y_list[2], R, color = (255, 0, 0), thickness = 2, fill = False)
img.draw_circle(x_list[3], y_list[3], R, color = (255, 0, 0), thickness = 2, fill = False)
# R-G-B 180-60-60
r = 3
img.draw_circle(x_list[0], y_list[0], r, color = (255, 255, 0), thickness = 1, fill = False)
img.draw_circle(x_list[1], y_list[1], r, color = (255, 255, 0), thickness = 1, fill = False)
img.draw_circle(x_list[2], y_list[2], r, color = (255, 255, 0), thickness = 1, fill = False)
img.draw_circle(x_list[3], y_list[3], r, color = (255, 255, 0), thickness = 1, fill = False)
utime.sleep_ms(250)
#str(num_list[0])
uart_B.write(str(0)+ str(0)+ str(0)+ str(0)+ str(6)+\
'.' + str(int(angle_list[3]/36)) + str(int(angle_list[2]/36)) + str(int(angle_list[1]/36)) + str(int(angle_list[0]/36)))
lcd.display(img) # Display on LCD
#lcd.draw_string(20,20,"%d: %.3f"%(max_index,pmax),lcd.WHITE,lcd.BLACK)
|
StarcoderdataPython
|
99126
|
import itertools
# combine iterators
it = itertools.chain([1, 2, 3], [4, 5, 6])
# repeat a value
it = itertools.repeat("hello", 3)
print(list(it))
# repeat an iterator's items
it = itertools.cycle([1, 2])
result = [next(it) for _ in range(10)]
print(result)
# split an iterator
it1, it2, it3 = itertools.tee(["first", "second"], 3)
print(list(it1))
print(list(it2))
print(list(it3))
# zip unequal length iterators with a default value
keys = ["one", "two", "three"]
values = [1, 2]
it = itertools.zip_longest(keys, values)
longest = list(it)
print(longest)
#
|
StarcoderdataPython
|
4998246
|
import numpy as np
import pandas as pd
import scipy.optimize as opt
from family import *
class FormulaError(Exception):
def handle():
return "Something is wrong with your formula..."
class lm:
def __init__(self, formula, data=None):
assert isinstance(data, pd.DataFrame)
assert isinstance(formula, str)
'''
formula is something like 'Y ~ X+Age+BMI'
'''
self.formula = formula.replace(" ", "")
try:
_Y_var, _X_var = self.formula.split("~")
variables = _X_var.split("+")
except ValueError as e:
raise FormulaError(FormulaError.handle())
if data is None:
# look in global environment
pass
try:
if variables[0] in ["0", "1"]:
_intercept = bool(int(variables[1]))
assert all(not var.isdigit() for var in [_Y_var] + variables[1:])
else:
_intercept = True
assert all(not var.isdigit() for var in [_Y_var] + variables)
self.y = data[_Y_var].values # pd Series (or column)
if variables[0] == '.':
variables = data.columns
variables.remove(_Y_var)
self.x = data[variables].values
except KeyError:
print("The names in the formula don't match the ones in the DataFrame.")
raise KeyError
except AssertionError:
raise FormulaError(FormulaError.handle())
n, p = self.x.shape
_x = np.hstack((np.ones(shape=[n, 1]), self.x))
self.coefficients = np.linalg.inv(_x.T@_x)@[email protected]
self.fitted_values = [email protected]
self.residuals = self.y - self.fitted_values
class glm(lm):
def __init__(self, formula, data=None, family=gaussian()):
'''
attributes inherited include:
formula, y, x, coefficients, residuals
only works for binomial with logit link, for now
'''
super().__init__(formula, data)
self.family = family
if isinstance(family, gaussian):
# do nothing: all inherited attributes are okay (linear model)
pass
elif isinstance(family, binomial):
# use Newton-raphson for minimization of negative log-likelihood
link = family.link
if link == "logit":
def neg_likelihood(beta):
_x = np.hstack((np.ones(shape=[n, 1]), self.x))
return -sum(np.exp(beta@_x) + self.y*(beta@_x))
init_beta = np.zeros(len(self.coefficients))
self.coefficients = opt.minimize(fun=neg_likelihood,
x0=init_beta,
method="Newton-CG")
|
StarcoderdataPython
|
3399601
|
from karlovic.model_server import model_server
|
StarcoderdataPython
|
3488880
|
<filename>ml-agents/mlagents/trainers/benchmark.py
from mlagents.envs.brain import BrainInfo
import numpy as np
class BenchmarkManager(object):
agent_status = [[]]
agent_amount = None
agent_benchmark_result = [] #[episode_len, cumulative_reward, success_goal]
success_threshold = None
# agent_benchmark_result = [[5,100,True],[6,120,False], [10,255,True]]
benchmark_episode = None
verbose = False
@staticmethod
def is_complete():
return len(BenchmarkManager.agent_benchmark_result) >= BenchmarkManager.benchmark_episode
@staticmethod
def add_result(info: BrainInfo):
for agent_index in range(BenchmarkManager.agent_amount):
BenchmarkManager.agent_status[agent_index][0] += 1
BenchmarkManager.agent_status[agent_index][1] += info.rewards[agent_index]
if info.local_done[agent_index]:
if BenchmarkManager.verbose:
print('Episode Length', BenchmarkManager.agent_status[agent_index][0])
print('Cumulative Reward', BenchmarkManager.agent_status[agent_index][1])
u = BenchmarkManager.agent_status[agent_index][:]
if info.rewards[agent_index] >= BenchmarkManager.success_threshold:
u.append(True)
else:
u.append(False)
BenchmarkManager.agent_benchmark_result.append(u[:])
BenchmarkManager.agent_status[agent_index][0] = 0
BenchmarkManager.agent_status[agent_index][1] = 0
@staticmethod
def analyze():
result = np.array(BenchmarkManager.agent_benchmark_result)
print('Episode Length: Avg = %.2f, Std = %.2f' % (np.average(result[:,0]), np.std(result[:,0])))
print('Reward: Avg = %.2f, Std = %.2f' % (np.average(result[:,1]), np.std(result[:,1])))
print(
'Success Rate: ',
'{:.0%}'.format(np.sum(result[:,2]) / len(BenchmarkManager.agent_benchmark_result))
)
def __init__(self, agent_amount, benchmark_episode, success_threshold, verbose):
BenchmarkManager.agent_status = [[0 for x in range(2)] for y in range(agent_amount)]
BenchmarkManager.agent_amount = agent_amount
BenchmarkManager.benchmark_episode = benchmark_episode
BenchmarkManager.success_threshold = success_threshold
BenchmarkManager.verbose = verbose
|
StarcoderdataPython
|
4967602
|
<filename>vagrant/main.py
#!/usr/bin/env python3
import psycopg2
def get_query_result(query):
db = psycopg2.connect(database="news")
c = db.cursor()
c.execute(query)
result = c.fetchall()
db.close()
return result
def print_breaks():
print()
print("########==============================================########")
print()
query = """
SELECT title_path.t, count(log.path) AS c
FROM
(SELECT DISTINCT
articles.title AS t,
log.path AS p
FROM
articles
LEFT JOIN log
ON log.path ILIKE '%' || articles.slug || '%'
AND log.status = '200 OK') AS title_path
LEFT JOIN log on log.path = title_path.p
GROUP BY title_path.t
ORDER BY c DESC
LIMIT(3);
"""
print("Quais são os três artigos mais populares de todos os tempos?")
print()
print("{:>35s}{:>20s}".format("Título", "Nº de acessos"))
for row in get_query_result(query):
print("{:>35s}{:>20d}".format(row[0], row[1]))
print_breaks()
query = """
SELECT slug_name.aun, count(log.path) AS c
FROM
(SELECT
articles.slug AS ars,
authors.name AS aun
FROM
authors
LEFT JOIN articles
ON authors.id = articles.author) AS slug_name
LEFT JOIN log
ON log.path ILIKE '%' || slug_name.ars || '%'
AND log.status = '200 OK'
GROUP BY slug_name.aun
ORDER BY c DESC
LIMIT(3);
"""
print("Quem são os autores de artigos mais populares de todos os tempos?")
print()
print("{:>35s}{:>20s}".format("Author", "Nº de acessos"))
for row in get_query_result(query):
print("{:>35s}{:>20d}".format(row[0], row[1]))
print_breaks()
query = """
SELECT *
FROM
(SELECT
to_char(date_trunc('day', time), 'DD Mon YYYY'),
(SUM(
CASE WHEN status = '404 NOT FOUND' THEN 1 ELSE 0 END
)::DECIMAL / COUNT(status) ) * 100 AS percent_total
FROM log
GROUP BY 1
ORDER BY 1) AS temp_table
WHERE percent_total > 1;
"""
print("Em quais dias mais de 1% das requisições resultaram em erros?")
print()
print("{:>15s}{:>20s}".format("DATA", "PORCENTAGEM (%)"))
result = get_query_result(query)[0]
print("{:>15s}{:>19.4f}%".format(result[0], float(result[1])))
print_breaks()
|
StarcoderdataPython
|
5046138
|
<reponame>ceyeoh/fyp_doppler<filename>website/utils.py
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter
ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg"}
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def find_y(img, thres, tol=10, gthres=0.1):
y1, y2 = 0, 0
for row in range(img.shape[0]):
if img[row].max() > thres:
gtruth = np.array(np.unique((img[row] > thres), return_counts=True)).T
if gtruth[-1][-1] > gthres * img.shape[1]:
if row - tol < 0:
y1 = 0
else:
y1 = row - tol
break
for row in sorted(range(img.shape[0]), reverse=True):
if img[row].max() > thres:
gtruth = np.array(np.unique((img[row] > thres), return_counts=True)).T
if gtruth[-1][-1] > gthres * img.shape[1]:
if row + tol > img.shape[0]:
y2 = img.shape[0]
else:
y2 = row + tol
break
return y1, y2
def find_x(img, thres):
x1, x2 = 0, 0
for col in range(img.shape[1]):
if img[:, col].max() > thres:
x1 = col
break
for col in sorted(range(img.shape[1]), reverse=True):
if img[:, col].max() > thres:
x2 = col
break
return x1, x2
def loader(filename):
img = Image.open(filename)
img = img.convert("L")
w, h = img.size
x1 = 0.1 * w
x2 = 0.85 * w
y1 = 0.51 * h
y2 = 0.94 * h
img = img.crop((x1, y1, x2, y2))
img = img.filter(ImageFilter.GaussianBlur(radius=2))
img_mat = np.array(img)
x1, x2 = find_x(img_mat, thres=100)
y1, y2 = find_y(img_mat, thres=60, gthres=0.075)
img = img.crop((x1, y1, x2, y2))
c = 1
img = img.resize([i * c for i in [448, 112]])
return img.convert("RGB")
|
StarcoderdataPython
|
3375480
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the AWSCollector."""
from __future__ import unicode_literals
import unittest
import mock
from libcloudforensics.providers.aws.internal import account as aws_account
from libcloudforensics.providers.aws.internal import ebs, ec2
from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib.collectors import aws
from dftimewolf.lib.containers import containers
with mock.patch('boto3.session.Session._setup_loader') as mock_session:
mock_session.return_value = None
FAKE_AWS_ACCOUNT = aws_account.AWSAccount(
default_availability_zone='fake-zone-2b')
FAKE_ANALYSIS_VM = ec2.AWSInstance(
FAKE_AWS_ACCOUNT,
'fake-analysis-vm-id',
'fake-zone-2',
'fake-zone-2b',
name='fake-analysis-vm')
FAKE_INSTANCE = ec2.AWSInstance(
FAKE_AWS_ACCOUNT,
'my-owned-instance-id',
'fake-zone-2',
'fake-zone-2b')
FAKE_VOLUME = ebs.AWSVolume(
'fake-volume-id',
FAKE_AWS_ACCOUNT,
'fake-zone-2',
'fake-zone-2b',
False)
FAKE_BOOT_VOLUME = ebs.AWSVolume(
'fake-boot-volume-id',
FAKE_AWS_ACCOUNT,
'fake-zone-2',
'fake-zone-2b',
False,
name='fake-boot-volume',
device_name='/dev/spf')
FAKE_VOLUME_COPY = ebs.AWSVolume(
'fake-volume-id-copy',
FAKE_AWS_ACCOUNT,
'fake-zone-2',
'fake-zone-2b',
False)
class AWSCollectorTest(unittest.TestCase):
"""Tests for the AWS collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
gcloud_collector = aws.AWSCollector(test_state)
self.assertIsNotNone(gcloud_collector)
# pylint: disable=invalid-name
@mock.patch('boto3.session.Session._setup_loader')
@mock.patch('libcloudforensics.providers.aws.internal.ec2.AWSInstance')
@mock.patch('libcloudforensics.providers.aws.forensics.StartAnalysisVm')
def testSetUp1(self, mock_StartAnalysisVm, mock_AWSInstance, mock_loader):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
mock_StartAnalysisVm.return_value = (mock_AWSInstance, None)
mock_loader.return_value = None
aws_collector = aws.AWSCollector(test_state)
# Setup the collector with minimum information
aws_collector.SetUp(
'test-remote-profile-name',
'test-remote-zone',
'fake_incident_id',
remote_instance_id='my-owned-instance-id'
)
self.assertEqual([], test_state.errors)
self.assertEqual(
'test-remote-profile-name', aws_collector.remote_profile_name)
self.assertEqual('test-remote-zone', aws_collector.remote_zone)
self.assertEqual('fake_incident_id', aws_collector.incident_id)
self.assertEqual([], aws_collector.volume_ids)
self.assertEqual(aws_collector.all_volumes, False)
self.assertEqual(
'test-remote-profile-name', aws_collector.analysis_profile_name)
self.assertEqual('test-remote-zone', aws_collector.analysis_zone)
mock_StartAnalysisVm.assert_called_with(
'aws-forensics-vm-fake_incident_id',
'test-remote-zone',
50,
ami=None,
cpu_cores=16,
dst_profile='test-remote-profile-name'
)
# pylint: disable=invalid-name
@mock.patch('boto3.session.Session._setup_loader')
@mock.patch('libcloudforensics.providers.aws.forensics.StartAnalysisVm')
def testSetUp2(self, mock_StartAnalysisVm, mock_loader):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
mock_StartAnalysisVm.return_value = (FAKE_INSTANCE, None)
mock_loader.return_value = None
aws_collector = aws.AWSCollector(test_state)
# Setup the collector with an instance ID, destination zone and profile.
aws_collector.SetUp(
'test-remote-profile-name',
'test-remote-zone',
'fake_incident_id',
remote_instance_id='my-owned-instance-id',
all_volumes=True,
analysis_profile_name='test-analysis-profile-name',
analysis_zone='test-analysis-zone'
)
self.assertEqual([], test_state.errors)
self.assertEqual(
'test-remote-profile-name', aws_collector.remote_profile_name)
self.assertEqual('test-remote-zone', aws_collector.remote_zone)
self.assertEqual('fake_incident_id', aws_collector.incident_id)
self.assertEqual([], aws_collector.volume_ids)
self.assertEqual(aws_collector.all_volumes, True)
self.assertEqual('my-owned-instance-id', aws_collector.remote_instance_id)
self.assertEqual(
'test-analysis-profile-name', aws_collector.analysis_profile_name)
self.assertEqual('test-analysis-zone', aws_collector.analysis_zone)
mock_StartAnalysisVm.assert_called_with(
'aws-forensics-vm-fake_incident_id',
'test-analysis-zone',
50,
ami=None,
cpu_cores=16,
dst_profile='test-analysis-profile-name'
)
# pylint: disable=line-too-long, invalid-name
@mock.patch('boto3.session.Session._setup_loader')
@mock.patch('libcloudforensics.providers.aws.forensics.StartAnalysisVm')
@mock.patch('libcloudforensics.providers.aws.forensics.CreateVolumeCopy')
@mock.patch('dftimewolf.lib.collectors.aws.AWSCollector._FindVolumesToCopy')
@mock.patch('libcloudforensics.providers.aws.internal.ec2.AWSInstance.AttachVolume')
def testProcess(self,
unused_mock_AttachVolume,
mock_FindVolumesToCopy,
mock_CreateVolumeCopy,
mock_StartAnalysisVm,
mock_loader):
"""Tests the collector's Process() function."""
mock_StartAnalysisVm.return_value = (FAKE_ANALYSIS_VM, None)
mock_FindVolumesToCopy.return_value = [FAKE_VOLUME]
mock_CreateVolumeCopy.return_value = FAKE_VOLUME_COPY
mock_loader.return_value = None
test_state = state.DFTimewolfState(config.Config)
aws_collector = aws.AWSCollector(test_state)
aws_collector.SetUp(
'test-remote-profile-name',
'test-remote-zone',
'fake_incident_id',
remote_instance_id='my-owned-instance-id',
all_volumes=True,
analysis_profile_name='test-analysis-profile-name',
analysis_zone='test-analysis-zone'
)
aws_collector.Process()
mock_CreateVolumeCopy.assert_called_with(
'test-remote-zone',
dst_zone='test-analysis-zone',
volume_id=FAKE_VOLUME.volume_id,
src_profile='test-remote-profile-name',
dst_profile='test-analysis-profile-name')
forensics_vms = test_state.GetContainers(containers.ForensicsVM)
forensics_vm = forensics_vms[0]
self.assertEqual('fake-analysis-vm', forensics_vm.name)
self.assertEqual(
'fake-volume-id-copy', forensics_vm.evidence_disk.volume_id)
# pylint: disable=line-too-long
@mock.patch('boto3.session.Session._setup_loader')
@mock.patch('libcloudforensics.providers.aws.internal.ec2.AWSInstance.GetBootVolume')
@mock.patch('libcloudforensics.providers.aws.internal.ebs.EBS.GetVolumeById')
@mock.patch('libcloudforensics.providers.aws.internal.ec2.AWSInstance.ListVolumes')
@mock.patch('libcloudforensics.providers.aws.internal.ec2.EC2.GetInstanceById')
@mock.patch('libcloudforensics.providers.aws.forensics.StartAnalysisVm')
# We're manually calling protected functions
# pylint: disable=protected-access, invalid-name
def testFindVolumesToCopy(self,
mock_StartAnalysisVm,
mock_GetInstanceById,
mock_ListVolumes,
mock_GetVolumeById,
mock_GetBootVolume,
mock_loader):
"""Tests the FindVolumesToCopy function with different SetUp() calls."""
test_state = state.DFTimewolfState(config.Config)
aws_collector = aws.AWSCollector(test_state)
mock_StartAnalysisVm.return_value = (FAKE_INSTANCE, None)
mock_loader.return_value = None
mock_ListVolumes.return_value = {
FAKE_BOOT_VOLUME.volume_id: FAKE_BOOT_VOLUME,
FAKE_VOLUME.volume_id: FAKE_VOLUME
}
mock_GetVolumeById.return_value = FAKE_VOLUME
mock_GetInstanceById.return_value = FAKE_INSTANCE
mock_GetBootVolume.return_value = FAKE_BOOT_VOLUME
# Nothing is specified, AWSCollector should collect the instance's
# boot volume
aws_collector.SetUp(
'test-remote-profile-name',
'test-remote-zone',
'fake_incident_id',
remote_instance_id='my-owned-instance-id'
)
volumes = aws_collector._FindVolumesToCopy()
self.assertEqual(1, len(volumes))
self.assertEqual('fake-boot-volume-id', volumes[0].volume_id)
mock_GetInstanceById.assert_called_once()
mock_GetBootVolume.assert_called_once()
mock_ListVolumes.assert_not_called()
# Specifying all_volumes should return all volumes for the instance
# (see mock_ListVolumes return value)
aws_collector.SetUp(
'test-remote-profile-name',
'test-remote-zone',
'fake_incident_id',
remote_instance_id='my-owned-instance-id',
all_volumes=True
)
volumes = aws_collector._FindVolumesToCopy()
self.assertEqual(2, len(volumes))
self.assertEqual('fake-boot-volume-id', volumes[0].volume_id)
self.assertEqual('fake-volume-id', volumes[1].volume_id)
mock_ListVolumes.assert_called_once()
# If a list of 1 volume ID is passed, that volume only should be returned
aws_collector.SetUp(
'test-remote-profile-name',
'test-remote-zone',
'fake_incident_id',
remote_instance_id='',
volume_ids=FAKE_VOLUME.volume_id
)
volumes = aws_collector._FindVolumesToCopy()
self.assertEqual(1, len(volumes))
self.assertEqual('fake-volume-id', volumes[0].volume_id)
mock_GetVolumeById.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1795705
|
<reponame>Brndan/decharges-sudeducation
from django.views.generic import TemplateView
from decharges.decharge.mixins import CheckConfigurationMixin, FederationRequiredMixin
from decharges.decharge.models import (
TempsDeDecharge,
UtilisationCreditDeTempsSyndicalPonctuel,
UtilisationTempsDecharge,
)
from decharges.decharge.views.utils import calcul_repartition_temps
from decharges.user_manager.models import Syndicat
class SyndicatsARelancer(
CheckConfigurationMixin, FederationRequiredMixin, TemplateView
):
template_name = "decharge/syndicats_a_relancer.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
annee_en_cours = self.params.annee_en_cours
context["annee"] = annee_en_cours
temps_de_decharge_mutualise = TempsDeDecharge.objects.filter(
annee=annee_en_cours
)
utilisation_temps_decharge = UtilisationTempsDecharge.objects.filter(
annee=annee_en_cours,
)
utilisation_cts = UtilisationCreditDeTempsSyndicalPonctuel.objects.filter(
annee=annee_en_cours,
)
syndicats_depassant_leur_quota = []
for syndicat in Syndicat.objects.all():
(
cts_consommes,
temps_decharge_federation,
temps_donnes,
temps_donnes_total,
temps_recus_par_des_syndicats,
temps_recus_par_la_federation,
temps_restant,
temps_utilises,
temps_utilises_total,
) = calcul_repartition_temps(annee_en_cours, self.federation, syndicat)
if temps_restant < 0:
syndicats_depassant_leur_quota.append((syndicat, abs(temps_restant)))
context["syndicats_n_ayant_rien_rempli"] = (
Syndicat.objects.exclude(pk=self.federation.pk)
.exclude(temps_de_decharges_donnes__in=temps_de_decharge_mutualise)
.exclude(
utilisation_temps_de_decharges_par_annee__in=utilisation_temps_decharge
)
.exclude(utilisation_cts_ponctuels_par_annee__in=utilisation_cts)
.order_by("username")
)
context["syndicats_depassant_leur_quota"] = syndicats_depassant_leur_quota
return context
|
StarcoderdataPython
|
6660528
|
<filename>hamiltonian/manager.py<gh_stars>0
# Copyright (c) 2020 <NAME>
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Nodes Managers
"""
import abc
import math
import numpy as np
from collections import defaultdict
from .render import animate
class Manager(object):
"""
A manager class adds nodes or links and refreshes their positions.
"""
def __init__(self, callback=None, **kwargs):
self._callback = callback
self._needs_refresh = False
def start(self, **kwargs):
"""
Start animating
"""
animate(self.render_callback, **kwargs)
def refresh(self):
"""
Function to ask for a refresh of the nodes destionations
"""
self._needs_refresh = True
def render_callback(self, render):
"""
Internal function called on each frame
"""
if self._callback:
self._callback(self)
if self._needs_refresh:
# We only refresh the destination positions if necessary
self.update(render)
self._needs_refresh = False
@abc.abstractmethod
def update(self, render, **kwargs):
"""
Function called by the graphical thread to change the current schema.
This is supposed to be overloaded by a manager class, to use:
- render.get_node
- render.add_node
- render.remove_node
- render.add_link
- render.remove_link
"""
pass
### HUB MANAGER ###
def _get_circle_locs(r, n, phi=0):
"""
Get the List of n desired locations in the circle of radius r
"""
tht = 2 * math.pi / n
return [
np.array((r * math.cos(phi + tht * i),
r * math.sin(phi + tht * i)))
for i in range(n)
]
def _get_next_hub_pos(hubs):
"""
Get the position of the center of the next hub based on existing one
"""
nb = len(hubs.keys())
return np.array((nb % 2, nb * 2))
class HubManager(Manager):
"""
Order Nodes in hubs
"""
def __init__(self, callback=None, radius=1.):
self.objects = defaultdict(list)
self.hubs = {}
self.radius = radius
self.new_points = {}
super(HubManager, self).__init__(callback)
def get_rad_and_phi(self, layer):
"""
Internal function to get the radius and angles of Nodes
around another one.
"""
rd = self.radius / (2 ** layer)
phi = math.pi / 4 if ((1 + layer) % 2) else 0
return rd, phi
def add_hub(self, name, pos=None, **kwargs):
"""
Add a standalone hub
:param name: the hub's name
:param pos: the hub's position
"""
self.objects[name] = []
if pos is None:
pos = _get_next_hub_pos(self.hubs)
self.hubs[name] = pos
self.new_points[name] = (None, kwargs)
self.refresh()
def add_point(self, name, under, **kwargs):
"""
Add a point linked to another one
:param name: the point's name
:param under: the parent's name
"""
self.objects[name] = []
self.objects[under].append(name)
self.new_points[name] = (under, kwargs)
self.refresh()
def update(self, render, cur=None, center=None, i=0):
"""
Internal function used to recalculate all destinations
"""
if cur is None:
# Entry: iterate through hubs
for hub, pos in self.hubs.items():
if hub in self.new_points:
kwargs = self.new_points.pop(hub)[1]
render.add_node(hub, pos, **kwargs)
self.update(render, cur=hub, center=pos, i=0)
return
subs = self.objects[cur]
if not subs:
# Node has no child
return
rd, phi = self.get_rad_and_phi(i)
poss = _get_circle_locs(rd, len(subs), phi)
for i, name in enumerate(subs):
pos = center + poss[i]
# Check for new point
if name in self.new_points:
# Create point at desired location
under, kwargs = self.new_points.pop(name)
node = render.add_node(name, pos, **kwargs)
if under:
# Link to upper point
render.add_link(render.get_node(under), node, **kwargs)
else:
# Update node destination
node = render.get_node(name)
node.set_destination(pos)
if self.objects[name]:
self.update(render, cur=name, center=pos, i=i+1)
|
StarcoderdataPython
|
3592005
|
import os
import uuid
from datetime import datetime, timedelta
import mock
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from utils.widget import quill
from wiki.forms import wikipageform
from wiki.models import wikipage, wikisection
from wiki.models.permissionpage import PermissionPage
from wiki.models.permissionsection import PermissionSection
from wiki.models.wikipage import Keywords, WikiPage
from wiki.models.wikisection import WikiSection
def render_mock(request, template, data, content_type='test'):
return {'request':request, 'template':template, 'data': data, 'content_type':content_type}
def redirect_mock(link):
return link
def reverse_mock(link, kwargs=None):
if kwargs is None:
return link
return link
class req:
def __init__(self, method='GET', post={}, user=None):
self.method = method
self.user = user
self.POST = post
class WikiPageFormTestCase(TestCase):
def setUp(self):
self.firstUser = User(is_superuser=True, username='test1', password='<PASSWORD>', email='<EMAIL>', first_name='testname1', last_name='testlast2')
self.secondUser = User(is_superuser=False, username='test2', password='<PASSWORD>', email='<EMAIL>', first_name='testname2', last_name='testlast2')
self.thirdUser = User(is_superuser=False, username='test3', password='<PASSWORD>', email='<EMAIL>', first_name='testname3', last_name='testlast3')
self.fourthUser = User(is_superuser=False, username='test4', password='<PASSWORD>', email='<EMAIL>', first_name='testname4', last_name='testlast4')
self.firstUser.save()
self.secondUser.save()
self.thirdUser.save()
self.fourthUser.save()
self.wikiuuid = [uuid.uuid4(), uuid.uuid4(), uuid.uuid4(), uuid.uuid4()]
self.wikistext = ['{"ops":[{"insert":"123123\\n"}]}', 'text', None]
self.wikisuuid = [uuid.uuid4(), uuid.uuid4(), uuid.uuid4(), uuid.uuid4(), uuid.uuid4()]
self.wikipath = 'wiki'
self.wikipagelink = 'wiki_page'
self.wikimainpagelink = 'wiki_homepage'
self.softwarename = 'name'
self.formtemplate = 'forms/unimodelform.html'
self.contenttype = 'text/html'
self.createdtime = datetime.now(pytz.utc)
self.wikiPages = []
self.permissions = []
for i in range(3):
self.wikiPages.append(WikiPage(unid=self.wikiuuid[i], createdon=self.createdtime, updatedon=self.createdtime, createdby=self.firstUser, updatedby=self.secondUser, title='testpage'+str(i+1)))
self.wikiPages[i].save()
self.wikiPages[i].createdon=self.createdtime + timedelta(hours=i)
self.wikiPages[i].updatedon=self.createdtime + timedelta(hours=i)
self.wikiPages[i].save()
self.pagepermissions = []
pagep = PermissionPage(createdby=self.firstUser, accesslevel=20, grantedto=self.thirdUser, wikipage=self.wikiPages[2])
pagep.save()
self.pagepermissions.append(pagep)
pagep = PermissionPage(createdby=self.firstUser, accesslevel=30, grantedto=self.fourthUser, wikipage=self.wikiPages[2])
pagep.save()
self.pagepermissions.append(pagep)
self.wikiSections = []
for i in range(3):
self.wikiSections.append(WikiSection(unid=self.wikisuuid[i], createdon=self.createdtime, updatedon=self.createdtime, createdby=self.firstUser, updatedby=self.secondUser, title='testsec'+str(i+1), pageorder=i+1, text=self.wikistext[i], wikipage=self.wikiPages[0]))
self.wikiSections[i].save()
self.wikiSections[i].createdon=self.createdtime + timedelta(hours=i)
self.wikiSections[i].updatedon=self.createdtime + timedelta(hours=i)
perm = PermissionSection(createdby=self.firstUser, accesslevel=20, grantedto=self.secondUser, section=self.wikiSections[i])
perm.save()
self.permissions.append(perm)
perm = PermissionSection(createdby=self.firstUser, accesslevel=10, grantedto=self.thirdUser, section=self.wikiSections[i])
perm.save()
self.permissions.append(perm)
if i==1:
self.wikiSections[1].createdby = None
self.wikiSections[1].updatedby = None
self.wikiSections[i].save()
settings.SOFTWARE_NAME_SHORT = self.softwarename
wikipageform.settings.SOFTWARE_NAME_SHORT = self.softwarename
wikipageform.settings.WIKI_FILES = self.wikipath
os.path.exists = mock.Mock(return_value=True, spec='os.path.exists')
os.makedirs = mock.Mock(return_value=None, spec='os.makedirs')
wikipageform.render = mock.Mock(side_effect=render_mock)
wikipageform.redirect = mock.Mock(side_effect=redirect_mock)
wikipageform.reverse = mock.Mock(side_effect=reverse_mock)
wikipage.reverse = mock.Mock(side_effect=reverse_mock)
def test_wiki_page_form_get_request_super_user(self):
post = {'action':'add'}
method = 'GET'
request = req(method=method, user=self.firstUser)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result['request'], request)
self.assertEqual(result['template'], self.formtemplate)
data = result['data']
self.assertEqual(data['action'], 'add')
self.assertEqual(data['PAGE_TITLE'], 'Post an article: ' + self.softwarename)
self.assertEqual(data['minititle'], 'Post Article')
self.assertEqual(data['submbutton'], 'Post article')
self.assertEqual(data['backurl'], self.wikimainpagelink)
self.assertEqual(data['needquillinput'], True)
self.assertIsInstance(data['form'], wikipageform.WikiPageForm)
self.assertEqual(result['content_type'], self.contenttype)
def test_wiki_page_form_get_request_no_access(self):
method = 'GET'
request = req(method=method, user=self.thirdUser)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_get_request_no_permissions(self):
method = 'GET'
request = req(method=method, user=self.fourthUser)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_post_request_no_action(self):
post = {}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result['request'], request)
self.assertEqual(result['template'], self.formtemplate)
data = result['data']
self.assertEqual(data['action'], 'add')
self.assertEqual(data['PAGE_TITLE'], 'Post an article: ' + self.softwarename)
self.assertEqual(data['minititle'], 'Post Article')
self.assertEqual(data['submbutton'], 'Post article')
self.assertEqual(data['backurl'], self.wikimainpagelink)
self.assertEqual(data['needquillinput'], True)
self.assertIsInstance(data['form'], wikipageform.WikiPageForm)
self.assertEqual(result['content_type'], self.contenttype)
def test_wiki_page_form_post_request_no_action_no_permissions(self):
post = {}
method = 'POST'
request = req(method=method, user=self.thirdUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_add_request_success(self):
post = {'action':'add', 'title':self.wikiPages[0].title}
method = 'POST'
WikiPage.objects.all().delete()
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikipagelink)
self.assertEqual(len(WikiPage.objects.all()), 1)
wiki = WikiPage.objects.all()[0]
self.assertEqual(wiki.title, self.wikiPages[0].title)
self.assertEqual(wiki.createdby, self.firstUser)
self.assertEqual(wiki.updatedby, self.firstUser)
def test_wiki_page_form_add_request_failed_no_access(self):
post = {'action':'add', 'title':self.wikiPages[0].title}
method = 'POST'
request = req(method=method, user=self.thirdUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_add_request_failed_no_permissions(self):
post = {'action':'add', 'title':self.wikiPages[0].title}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_add_request_failed_no_title(self):
post = {'action':'add', 'title':None}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result['request'], request)
self.assertEqual(result['template'], self.formtemplate)
data = result['data']
self.assertEqual(data['action'], 'add')
self.assertEqual(data['PAGE_TITLE'], 'Post an article: ' + self.softwarename)
self.assertEqual(data['minititle'], 'Post Article')
self.assertEqual(data['submbutton'], 'Post article')
self.assertEqual(data['backurl'], self.wikimainpagelink)
self.assertEqual(data['needquillinput'], True)
self.assertIsInstance(data['form'], wikipageform.WikiPageForm)
self.assertTrue(('title' in data['form'].data) or (data['form'].data == {}))
self.assertEqual(result['content_type'], self.contenttype)
def test_wiki_page_form_change_request_success_super_user(self):
post = {'action':'change', 'targetid': self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result['request'], request)
self.assertEqual(result['template'], self.formtemplate)
data = result['data']
self.assertEqual(data['action'], 'changed')
self.assertEqual(data['targetid'], self.wikiPages[0].unid)
self.assertEqual(data['PAGE_TITLE'], 'Post an article: ' + self.softwarename)
self.assertEqual(data['minititle'], 'Change Posted Article')
self.assertEqual(data['submbutton'], 'Change posted article')
self.assertEqual(data['deletebutton'], 'Delete article')
self.assertEqual(data['backurl'], self.wikipagelink)
self.assertEqual(data['needquillinput'], True)
self.assertIsInstance(data['form'], wikipageform.WikiPageForm)
self.assertTrue('title' in data['form'].initial)
self.assertEqual(data['form'].initial['title'], self.wikiPages[0].title)
self.assertEqual(result['content_type'], self.contenttype)
def test_wiki_page_form_change_request_success_permission(self):
post = {'action':'change', 'targetid': self.wikiPages[2].unid}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result['request'], request)
self.assertEqual(result['template'], self.formtemplate)
data = result['data']
self.assertEqual(data['action'], 'changed')
self.assertEqual(data['targetid'], self.wikiPages[2].unid)
self.assertEqual(data['PAGE_TITLE'], 'Post an article: ' + self.softwarename)
self.assertEqual(data['minititle'], 'Change Posted Article')
self.assertEqual(data['submbutton'], 'Change posted article')
self.assertEqual(data['deletebutton'], 'Delete article')
self.assertEqual(data['backurl'], self.wikipagelink)
self.assertEqual(data['needquillinput'], True)
self.assertIsInstance(data['form'], wikipageform.WikiPageForm)
self.assertTrue('title' in data['form'].initial)
self.assertEqual(data['form'].initial['title'], self.wikiPages[2].title)
self.assertEqual(result['content_type'], self.contenttype)
def test_wiki_page_form_change_request_fail_no_access(self):
post = {'action':'change', 'targetid': self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.thirdUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_change_request_fail_no_permissions(self):
post = {'action':'change', 'targetid': self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_change_request_fail_no_page(self):
post = {'action':'change', 'targetid':uuid.uuid4()}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_change_request_fail_no_target_id(self):
post = {'action':'change'}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_changed_request_success_super_user(self):
post = {'action':'changed', 'targetid': self.wikiPages[0].unid, 'title':'new title'}
method = 'POST'
self.wikiPages[0].updatedby = self.secondUser
self.wikiPages[0].save()
wiki = WikiPage.objects.get(unid=self.wikiPages[0].unid)
self.assertEqual(wiki.updatedby, self.secondUser)
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikipagelink)
wiki = WikiPage.objects.get(unid=self.wikiPages[0].unid)
self.assertEqual(wiki.title, 'new title')
self.assertEqual(wiki.createdby, self.firstUser)
self.assertEqual(wiki.updatedby, self.firstUser)
self.assertNotEqual(wiki.updatedon, wiki.createdon)
def test_wiki_page_form_changed_request_success_permissions(self):
post = {'action':'changed', 'targetid': self.wikiPages[2].unid, 'title':'new title'}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikipagelink)
wiki = WikiPage.objects.get(unid=self.wikiPages[2].unid)
self.assertEqual(wiki.title, 'new title')
self.assertEqual(wiki.createdby, self.firstUser)
self.assertEqual(wiki.updatedby, self.fourthUser)
self.assertNotEqual(wiki.updatedon, wiki.createdon)
def test_wiki_page_form_changed_request_failed_no_access(self):
post = {'action':'changed', 'targetid': self.wikiPages[0].unid, 'title':'new title'}
method = 'POST'
request = req(method=method, user=self.thirdUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_changed_request_failed_no_permissions(self):
post = {'action':'changed', 'targetid': self.wikiPages[0].unid, 'title':'new title'}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_changed_request_failed_no_title(self):
post = {'action':'changed', 'targetid': self.wikiPages[0].unid, 'title': None}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result['request'], request)
self.assertEqual(result['template'], self.formtemplate)
data = result['data']
self.assertEqual(data['action'], 'changed')
self.assertEqual(data['targetid'], self.wikiPages[0].unid)
self.assertEqual(data['PAGE_TITLE'], 'Post an article: ' + self.softwarename)
self.assertEqual(data['minititle'], 'Change Posted Article')
self.assertEqual(data['submbutton'], 'Change posted article')
self.assertEqual(data['deletebutton'], 'Delete article')
self.assertEqual(data['backurl'], self.wikipagelink)
self.assertEqual(data['needquillinput'], True)
self.assertIsInstance(data['form'], wikipageform.WikiPageForm)
self.assertTrue('title' in data['form'].initial)
self.assertEqual(data['form'].initial['title'], self.wikiPages[0].title)
self.assertEqual(result['content_type'], self.contenttype)
def test_wiki_page_form_changed_request_fail_no_page(self):
post = {'action':'changed', 'targetid':uuid.uuid4()}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_changed_request_fail_no_target_id(self):
post = {'action':'changed'}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_delete_request_success_super_user(self):
post = {'action':'delete', 'targetid':self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
try:
wiki = WikiPage.objects.get(unid=self.wikiPages[0].unid)
except:
wiki=None
self.assertIsNone(wiki)
def test_wiki_page_form_delete_request_success_permission(self):
post = {'action':'delete', 'targetid':self.wikiPages[2].unid}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
try:
wiki = WikiPage.objects.get(unid=self.wikiPages[2].unid)
except:
wiki=None
self.assertIsNone(wiki)
def test_wiki_page_form_delete_request_fail_wrong_action(self):
post = {'action':'qwertyu', 'targetid':self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_delete_request_fail_no_access(self):
post = {'action':'delete', 'targetid':self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.thirdUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_delete_request_fail_no_permissions(self):
post = {'action':'delete', 'targetid':self.wikiPages[0].unid}
method = 'POST'
request = req(method=method, user=self.fourthUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_delete_request_fail_no_page(self):
post = {'action':'delete', 'targetid':uuid.uuid4()}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_form_delete_request_fail_no_target_id(self):
post = {'action':'delete'}
method = 'POST'
request = req(method=method, user=self.firstUser, post=post)
result = wikipageform.WikiArticleFormParse(request)
self.assertEqual(result, self.wikimainpagelink)
def test_wiki_page_viewable_super_user(self):
self.assertTrue(self.wikiPages[0].viewable(self.firstUser))
def test_wiki_page_viewable_permission_editable(self):
self.assertTrue(self.wikiPages[0].viewable(self.secondUser))
def test_wiki_page_viewable_permission_viewable(self):
self.assertTrue(self.wikiPages[0].viewable(self.thirdUser))
def test_wiki_page_not_viewable_permission(self):
self.assertFalse(self.wikiPages[0].viewable(self.fourthUser))
def test_wiki_page_viewable_common_knowledge(self):
self.wikiPages[0].commonknowledge = True
self.wikiPages[0].save()
self.assertTrue(self.wikiPages[0].viewable(self.fourthUser))
|
StarcoderdataPython
|
6668069
|
from typing import Dict, List
from src.controller import fields
import src.model.person
class AddPersons(fields.AddMovieFieldBaseClass):
""" Action to add list of persons
Kludgy to use fields controller as base class, but given
time constraints it'll do.
"""
def execute(self, person_names: List[str]):
self.execute_add(person_names, src.model.person.Person, 'person')
class PersonIndexLookup(fields.MovieFieldIndexLookup):
""" Looks up person id by name """
def query(self) -> Dict[str, int]:
return self.query_index_lookup(src.model.person.Person)
|
StarcoderdataPython
|
6466897
|
<gh_stars>0
import networkx as network
import matplotlib.pyplot as gestor
def dibujar(grafo):
grafico = network.DiGraph()
for index in grafo.getElementos():
vertice = grafo.obtener(index)
grafico.add_node(vertice.getId(), nombre=vertice.getNombre())
for arista in vertice.getConectados():
grafico.add_edge(index, arista)
network.draw(grafico)
gestor.show()
network.draw(grafico)
gestor.savefig("grafico.png")
|
StarcoderdataPython
|
1952045
|
from django import forms
class BootstrapFormMixin:
fields = {}
def _init_bootstrap_form_controls(self):
for _, field in self.fields.items():
if not hasattr(field.widget, 'attrs'):
setattr(field.widget, 'attrs', {})
if 'class' not in field.widget.attrs:
field.widget.attrs['class'] = ''
field.widget.attrs['class'] += ' form-control'
class DisabledFieldsFormMixin:
disabled_fields = '__all__'
fields = {}
def _init_disabled_fields(self):
for name, field in self.fields.items():
if self.disabled_fields != '__all__' and name not in self.disabled_fields:
continue
if not hasattr(field.widget, 'attrs'):
setattr(field.widget, 'attrs', {})
if isinstance(field, forms.ChoiceField):
field.widget.attrs['disabled'] = 'readonly'
else:
field.widget.attrs['readonly'] = 'readonly'
|
StarcoderdataPython
|
3425409
|
#! /usr/bin/env python
"""
Author: <NAME>
Date:
graph_helper, plotting output of the network
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from termcolor import colored
from scipy.stats import gaussian_kde
import pandas as pd
from copy import deepcopy
from termcolor import colored
import numpy as np
note_str = colored("NOTE: ", 'blue')
warn_str = colored("WARNING: ", 'red')
def selectunits(data, units=32, is_bilstm = False):
"""Select random number of unit in data
"""
data = [data[0]]
print(np.shape(data))
batch_num, max_timesteps, units_all = np.shape(data)
print(np.shape(data))
np.random.seed(0)
res = np.zeros((1, max_timesteps, units))
if is_bilstm == True:
# still have bug
l = list(range(units//2))
np.random.shuffle(l)
sub_data1 = data[:, 0:units//2]
sub_data2 = data[:, units//2:]
res1 = sub_data1[:, l]
res2 = sub_data2[:, l]
res = np.concatenate((res1, res2), axis = 1)
else:
l = np.random.choice(units_all, units)
for i in range(len(data)):
res[i] = data[i][:, l]
return res
def show_scatter_density(data, units):
"""Draw th scatter density of the neuron
Argument:
data: the shape should be [max_timesteps, units]
"""
data = data[0]
print(np.shape(data))
fig, ax = plt.subplots()
for i in range(units):
z = gaussian_kde(data[:, i])(data[:, i])
x = [i+1] * data.shape[0]
a = ax.scatter(x, data[:, i], c=z, s=100, edgecolor='')
plt.colorbar(a)
plt.xlabel('Selected units')
plt.ylabel('Activation')
plt.show()
def show_box_plot(data, units):
data = data[0]
df = pd.DataFrame(data)
boxplot = df.boxplot()
def show_features_0D(data, marker='o', cmap='bwr', color=None, **kwargs):
"""Plots 0D aligned scatterplots in a standalone graph.
iter == list/tuple (both work)
Arguments:
data: np.ndarray, 2D: (samples, channels).
marker: str. Pyplot kwarg specifying scatter plot marker shape.
cmap: str. Pyplot cmap (colormap) kwarg for the heatmap. Overridden
by `color`!=None.
color: (float iter) iter / str / str iter / None. Pyplot kwarg,
specifying marker colors in order of drawing. If str/ float iter,
draws all curves in one color. Overrides `cmap`. If None,
automatically colors along equally spaced `cmap` gradient intervals.
Ex: ['red', 'blue']; [[0., .8, 1.], [.2, .5, 0.]] (RGB)
kwargs:
scale_width: float. Scale width of resulting plot by a factor.
scale_height: float. Scale height of resulting plot by a factor.
show_borders: bool. If True, shows boxes around plot(s).
title_mode: bool/str. If True, shows generic supertitle.
If str in ('grads', 'outputs'), shows supertitle tailored to
`data` dim (2D/3D). If other str, shows `title_mode` as supertitle.
If False, no title is shown.
show_y_zero: bool. If True, draws y=0.
title_fontsize: int. Title fontsize.
channel_axis: int. `data` axis holding channels/features. -1 = last axis.
markersize: int/int iter. Pyplot kwarg `s` specifying marker size(s).
markerwidth: int. Pyplot kwarg `linewidth` specifying marker thickness.
ylims: str ('auto'); float list/tuple. Plot y-limits; if 'auto',
sets both lims to max of abs(`data`) (such that y=0 is centered).
"""
scale_width = kwargs.get('scale_width', 1)
scale_height = kwargs.get('scale_height', 1)
show_borders = kwargs.get('show_borders', False)
title_mode = kwargs.get('title_mode', 'outputs')
show_y_zero = kwargs.get('show_y_zero', True)
title_fontsize = kwargs.get('title_fontsize', 14)
markersize = kwargs.get('markersize', 15)
markerwidth = kwargs.get('markerwidth', 2)
ylims = kwargs.get('ylims', 'auto')
def _catch_unknown_kwargs(kwargs):
allowed_kwargs = ('scale_width', 'scale_height', 'show_borders',
'title_mode', 'show_y_zero', 'title_fontsize',
'channel_axis', 'markersize', 'markerwidth', 'ylims')
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise Exception("unknown kwarg `%s`" % kwarg)
def _get_title(data, title_mode):
feature = "Context-feature"
context = "Context-units"
if title_mode in ['grads', 'outputs']:
feature = "Gradients" if title_mode=='grads' else "Outputs"
context = "Timesteps"
return "(%s vs. %s) vs. Channels" % (feature, context)
_catch_unknown_kwargs(kwargs)
if len(data.shape)!=2:
raise Exception("`data` must be 2D")
if color is None:
cmap = cm.get_cmap(cmap)
cmap_grad = np.linspace(0, 256, len(data[0])).astype('int32')
color = cmap(cmap_grad)
color = np.vstack([color] * data.shape[0])
x = np.ones(data.shape) * np.expand_dims(np.arange(1, len(data) + 1), -1)
if show_y_zero:
plt.axhline(0, color='k', linewidth=1)
plt.scatter(x.flatten(), data.flatten(), marker=marker,
s=markersize, linewidth=markerwidth, color=color)
plt.gca().set_xticks(np.arange(1, len(data) + 1), minor=True)
plt.gca().tick_params(which='minor', length=4)
if ylims == 'auto':
ymax = np.max(np.abs(data))
ymin = -ymax
else:
ymin, ymax = ylims
plt.gca().set_ylim(-ymax, ymax)
if title_mode:
title = _get_title(data, title_mode)
plt.title(title, weight='bold', fontsize=title_fontsize)
if not show_borders:
plt.box(None)
plt.gcf().set_size_inches(12*scale_width, 4*scale_height)
plt.show()
def show_features_2D(data, n_rows=None, norm=None, cmap='bwr', reflect_half=False,
timesteps_xaxis=True, max_timesteps=None, **kwargs):
"""Plots 2D heatmaps in a standalone graph or subplot grid.
iter == list/tuple (both work)
Arguments:
data: np.ndarray, 2D/3D. Data to plot.
2D -> standalone graph; 3D -> subplot grid.
3D: (samples, timesteps, channels)
2D: (timesteps, channels)
n_rows: int/None. Number of rows in subplot grid. If None,
determines automatically, closest to n_rows == n_cols.
norm: float iter. Normalizes colors to range between norm==(vmin, vmax),
according to `cmap`. Ex: `cmap`='bwr' ('blue white red') -> all
values <=vmin and >=vmax will be shown as most intense blue and
red, and those exactly in-between are shown as white.
cmap: str. Pyplot cmap (colormap) kwarg for the heatmap.
reflect_half: bool. If True, second half of channels dim will be
flipped about the timesteps dim.
timesteps_xaxis: bool. If True, the timesteps dim (`data` dim 1)
if plotted along the x-axis.
max_timesteps: int/None. Max number of timesteps to show per plot.
If None, keeps original.
kwargs:
scale_width: float. Scale width of resulting plot by a factor.
scale_height: float. Scale height of resulting plot by a factor.
show_borders: bool. If True, shows boxes around plot(s).
show_xy_ticks: int/bool iter. Slot 0 -> x, Slot 1 -> y.
Ex: [1, 1] -> show both x- and y-ticks (and their labels).
[0, 0] -> hide both.
show_colorbar: bool. If True, shows one colorbar next to plot(s).
title_mode: bool/str. If True, shows generic supertitle.
If str in ('grads', 'outputs'), shows supertitle tailored to
`data` dim (2D/3D). If other str, shows `title_mode` as supertitle.
If False, no title is shown.
title_fontsize: int. Title fontsize.
tight: bool. If True, plots compactly by removing subplot padding.
channel_axis: int, 0 or -1. `data` axis holding channels/features.
-1 --> (samples, timesteps, channels)
0 --> (channels, timesteps, samples)
borderwidth: float / None. Width of subplot borders.
dpi: int. Pyplot kwarg, 'dots per inch', specifying plot resolution
"""
scale_width = kwargs.get('scale_width', 1)
scale_height = kwargs.get('scale_height', 1)
show_borders = kwargs.get('show_borders', True)
show_xy_ticks = kwargs.get('show_xy_ticks', [True, True])
show_colorbar = kwargs.get('show_colorbar', False)
title_mode = kwargs.get('title_mode', 'outputs')
title_fontsize = kwargs.get('title_fontsize', 14)
tight = kwargs.get('tight', False)
channel_axis = kwargs.get('channel_axis', -1)
borderwidth = kwargs.get('borderwidth', None)
dpi = kwargs.get('dpi', 76)
def _catch_unknown_kwargs(kwargs):
allowed_kwargs = ('scale_width', 'scale_height', 'show_borders',
'show_xy_ticks', 'show_colorbar', 'title_mode',
'title_fontsize', 'channel_axis', 'tight',
'borderwidth', 'dpi')
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise Exception("unknown kwarg `%s`" % kwarg)
def _get_title(data, title_mode, timesteps_xaxis, vmin, vmax):
feature = "Context-feature"
context = "Context-units"
context_order = "(%s vs. Channels)" % context
extra_dim = ""
if title_mode in ['grads', 'outputs']:
feature = "Gradients" if title_mode=='grads' else "Outputs"
context = "Timesteps"
if timesteps_xaxis:
context_order = "(Channels vs. %s)" % context
if len(data.shape)==3:
extra_dim = ") vs. Samples"
context_order = "(" + context_order
norm_txt = "(%s, %s)" % (vmin, vmax) if (vmin is not None) else "auto"
return "{} vs. {}{} -- norm={}".format(context_order, feature,
extra_dim, norm_txt)
def _process_data(data, max_timesteps, reflect_half,
timesteps_xaxis, channel_axis):
if max_timesteps is not None:
data = data[..., :max_timesteps, :]
if reflect_half:
data = data.copy() # prevent passed array from changing
half_chs = data.shape[-1]//2
data[..., half_chs:] = np.flip(data[..., half_chs:], axis=0)
if timesteps_xaxis:
if len(data.shape) != 3:
data = np.expand_dims(data, 0)
data = np.transpose(data, (0, 2, 1))
return data
_catch_unknown_kwargs(kwargs)
if len(data.shape) not in (2, 3):
raise Exception("`data` must be 2D or 3D")
data = _process_data(data, max_timesteps, reflect_half,
timesteps_xaxis, channel_axis)
vmin, vmax = norm or (None, None)
n_subplots = len(data) if len(data.shape)==3 else 1
n_rows, n_cols = _get_nrows_and_ncols(n_rows, n_subplots)
fig, axes = plt.subplots(n_rows, n_cols, dpi=dpi, sharex=True, sharey=True)
axes = np.asarray(axes)
if title_mode:
title = _get_title(data, title_mode, timesteps_xaxis, vmin, vmax)
y = .93 + .12 * tight
plt.suptitle(title, weight='bold', fontsize=title_fontsize, y=y)
for ax_idx, ax in enumerate(axes.flat):
img = ax.imshow(data[ax_idx], cmap=cmap, vmin=vmin, vmax=vmax)
if not show_xy_ticks[0]:
ax.set_xticks([])
if not show_xy_ticks[1]:
ax.set_yticks([])
ax.axis('tight')
if not show_borders:
ax.set_frame_on(False)
if show_colorbar:
fig.colorbar(img, ax=axes.ravel().tolist())
if tight:
plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
if borderwidth is not None:
for ax in axes.flat:
[s.set_linewidth(borderwidth) for s in ax.spines.values()]
plt.gcf().set_size_inches(8*scale_width, 8*scale_height)
plt.show()
def _get_nrows_and_ncols(n_rows, n_subplots):
if n_rows is None:
n_rows = int(np.sqrt(n_subplots))
n_cols = max(int(n_subplots / n_rows), 1) # ensure n_cols != 0
n_rows = int(n_subplots / n_cols)
while not ((n_subplots / n_cols).is_integer() and
(n_subplots / n_rows).is_integer()):
n_cols -= 1
n_rows = int(n_subplots / n_cols)
return n_rows, n_cols
|
StarcoderdataPython
|
12825160
|
import unittest
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon, Point
import streetmapper
class TestJoinBldgsBlocks(unittest.TestCase):
def setUp(self):
self.blocks = gpd.GeoDataFrame(
{'block_uid': [1, 2]},
geometry=[
Polygon(((0, -1), (0, 1), (1, 1), (1, -1))), # top side
Polygon(((0, -1), (0, 1), (-1, 1), (-1, -1))) # bottom side
]
)
def testNoBldgs(self):
bldgs = gpd.GeoDataFrame()
blocks = self.blocks
matches, multimatches, nonmatches =\
streetmapper.pipeline.join_bldgs_blocks(bldgs, blocks, 'bldg_uid', 'block_uid')
self.assertEqual(len(matches), 0)
self.assertEqual(len(multimatches), 0)
self.assertEqual(len(nonmatches), 0)
def testFullyUnivariateMatch(self):
bldgs = gpd.GeoDataFrame(
{'bldg_uid': [1, 2, 3, 4]},
geometry=[
Polygon(((0, 0), (0, 1), (1, 1), (1, 0))).buffer(-0.01), # top right
Polygon(((0, 0), (0, -1), (1, -1), (1, 0))).buffer(-0.01), # top left
Polygon(((0, 0), (0, 1), (-1, 1), (-1, 0))).buffer(-0.01), # bottom right
Polygon(((0, 0), (0, -1), (-1, -1), (-1, 0))).buffer(-0.01) # bottom left
]
)
blocks = self.blocks
matches, multimatches, nonmatches =\
streetmapper.pipeline.join_bldgs_blocks(bldgs, blocks, 'bldg_uid', 'block_uid')
self.assertEqual(len(matches), 4)
self.assertEqual(len(multimatches), 0)
self.assertEqual(len(nonmatches), 0)
def testAllKindsOfMatches(self):
bldgs = gpd.GeoDataFrame(
{'bldg_uid': [1, 2, 3]},
geometry=[
Polygon(((0, 0), (0, 1), (1, 1), (1, 0))).buffer(-0.01), # top right, interior
Polygon(((-1, 0), (1, 0), (1, -1), (-1, -1))).buffer(-0.01), # bottom, spanning
Polygon(((10, 10), (10, 11), (11, 11), (11, 10))) # exterior
]
)
blocks = self.blocks
matches, multimatches, nonmatches =\
streetmapper.pipeline.join_bldgs_blocks(bldgs, blocks, 'bldg_uid', 'block_uid')
self.assertEqual(len(matches), 1)
self.assertEqual(len(multimatches), 2)
self.assertEqual(len(nonmatches), 1)
class TestBldgsOnBlock(unittest.TestCase):
def setUp(self):
self.block = Polygon(((0, 0), (0, 2), (2, 2), (2, 0)))
def testSimple(self):
bldgs = gpd.GeoDataFrame(geometry=[
Polygon(((0, 0), (0, 1), (1, 1), (1, 0))), # in
Polygon(((10, 10), (10, 11), (11, 11), (11, 10))) # out
])
result = streetmapper.pipeline.bldgs_on_block(bldgs, self.block)
assert len(result) == 1
def testMulitmatchOff(self):
bldgs = gpd.GeoDataFrame(geometry=[
Polygon(((0, 0), (0, 1), (1, 1), (1, 0))), # in
Polygon(((1, 1), (5, 1), (5, 5), (1, 5))) # through
])
result = streetmapper.pipeline.bldgs_on_block(bldgs, self.block, include_multimatches=False)
assert len(result) == 1
|
StarcoderdataPython
|
1849601
|
<reponame>ggabriel96/mapnames
import string
import unittest as ut
from mapnames import string
class EditDistanceTest(ut.TestCase):
def test_identity(self):
for i in range(len(string.digits)):
id = string.digits[:i]
self.assertEqual(string.wagner_fischer(id, id), 0)
def test_one_empty(self):
for i in range(len(string.digits)):
s = string.digits[:i]
self.assertEqual(string.wagner_fischer('', s), i)
self.assertEqual(string.wagner_fischer(s, ''), i)
def test_ascending(self):
for i in range(len(string.digits)):
s1 = string.digits[:i]
for j in range(len(string.digits)):
s2 = string.digits[:j]
self.assertEqual(string.wagner_fischer(s1, s2), abs(i - j))
def test_misc(self):
self.assertEqual(string.wagner_fischer('abc', 'b'), 2)
self.assertEqual(string.wagner_fischer('abc', 'b'), 2)
self.assertEqual(string.wagner_fischer('abc', 'ac'), 1)
self.assertEqual(string.wagner_fischer('abc', 'bc'), 1)
self.assertEqual(string.wagner_fischer('abcdefg', 'abce'), 3)
# Different from last case because of symmetric 'd'
self.assertEqual(string.wagner_fischer('abcdefg', 'gfedcba'), 6)
# '0123456789' and '9876543210'
self.assertEqual(
string.wagner_fischer(string.digits, string.digits[::-1]),
len(string.digits))
if __name__ == '__main__':
ut.main()
|
StarcoderdataPython
|
4868526
|
"""Runs integration test on the bot
"""
import os
import unittest
import re
from tgintegration import BotIntegrationClient
from karmabot.responses import START_BOT_RESPONSE, SUCCESSFUL_CLEAR_CHAT, SHOW_KARMA_NO_HISTORY_RESPONSE
from karmabot.commands_strings import START_COMMAND, CLEAR_CHAT_COMMAND, SHOW_KARMA_COMMAND, USER_INFO_COMMAND, CHAT_INFO_COMMAND, SHOW_KARMA_KEYBOARD_COMMAND
class IntegrationTests(unittest.TestCase):
""" Runs intergation tests"""
def setUp(self):
""" Sets up the environment"""
API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH")
TEST_BOT_NAME = os.environ.get("TEST_BOT_NAME")
if None in [API_HASH, API_ID, TEST_BOT_NAME]:
print("API_ID, API_HASH, TEST_BOT_NAME not set")
raise ValueError()
self.TEST_BOT_NAME = TEST_BOT_NAME
client = BotIntegrationClient(
bot_under_test=TEST_BOT_NAME,
session_name='./session/my_account', # Arbitrary file path to the Pyrogram session file
api_id=API_ID, # See "Requirements" above, ...
api_hash=API_HASH, # alternatively use a `config.ini` file
max_wait_response=15, # Maximum timeout for bot responses
min_wait_consecutive=2 # Minimum time to wait for consecutive messages
)
client.start()
#client.clear_chat()
self.client = client
def tearDown(self):
self.client.stop()
#pass
@unittest.skip("dont care that this works for now")
def test_start(self):
""" Test start command"""
response = self.client.send_command_await(START_COMMAND, num_expected=1)
self.assertEqual(len(response.messages), 1)
self.assertEqual(response.messages[0].text, START_BOT_RESPONSE)
@unittest.skip("dont care that this works for now")
def test_showkarma_works_on_empty_chat(self):
"""Clears the chat and tests that showkarma doesn't give a response"""
clear_chat_response = self.client.send_command_await(CLEAR_CHAT_COMMAND, num_expected=1)
self.assertEqual(len(clear_chat_response.messages), 1)
self.assertEqual(clear_chat_response.messages[0].text, SUCCESSFUL_CLEAR_CHAT)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
self.assertEqual(show_karma_response.messages[0].text, SHOW_KARMA_NO_HISTORY_RESPONSE)
@unittest.skip("vote overriding broken right now")
def test_votes_can_be_overriden(self):
"""Tests that if a message is +1 and then -1, the total net karma is 0"""
self.client.send_command_await(CLEAR_CHAT_COMMAND, num_expected=1)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
message = show_karma_response.messages[0]
chat_id = message.chat.id
message_id = message.message_id
self.client.send_message(chat_id, "+1", reply_to_message_id=message_id)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
bot_response = show_karma_response.messages[0].text
bot_name_without_at = self.TEST_BOT_NAME[1:]
does_bot_have_1_karma = bool(re.search(f"{bot_name_without_at}: 1", bot_response))
self.assertTrue(does_bot_have_1_karma)
self.client.send_message(chat_id, "-1", reply_to_message_id=message_id)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
does_bot_have_zero_karma = bool(re.search(f"{bot_name_without_at}: 0", show_karma_response.messages[0].text))
self.assertTrue(does_bot_have_zero_karma, '-1 on same message should override last vote')
@unittest.skip("TEMPORARY")
def test_upvote(self):
"""Tests that upvoting a message results in +1 karma"""
self.client.send_command_await(CLEAR_CHAT_COMMAND)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
message = show_karma_response.messages[0]
chat_id = message.chat.id
message_id = message.message_id
self.client.send_message(chat_id, "+1", reply_to_message_id=message_id)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
bot_name_without_at = self.TEST_BOT_NAME[1:]
bot_response = show_karma_response.messages[0].text
does_bot_have_1_karma = re.search(f"{bot_name_without_at}: 1", bot_response)
self.assertTrue(does_bot_have_1_karma, "Bot should have 1 karma after 1 plus 1")
@unittest.skip("dont care that this works for now")
def test_downvote(self):
"""Tests that downvoting a message results in -1 karma"""
self.client.send_command_await(CLEAR_CHAT_COMMAND)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
message = show_karma_response.messages[0]
chat_id = message.chat.id
message_id = message.message_id
self.client.send_message(chat_id, "-1", reply_to_message_id=message_id)
show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
self.assertEqual(len(show_karma_response.messages), 1)
bot_name_without_at = self.TEST_BOT_NAME[1:]
bot_response = show_karma_response.messages[0].text
does_bot_have_1_karma = re.search(f"{bot_name_without_at}: -1", bot_response)
self.assertTrue(does_bot_have_1_karma, "Bot should have 1 karma after 1 plus 1")
#(chat_id, "+1", send_to_message_id=message_id)
#print(f"Message id: {message.message_id}")
#TOOD: how to send message as response to other message. Could be done directly with pyrogram
#print(clear_chat_response)
#print(dir(clear_chat_response))
#clear chat
#run showkarma (capture message id)
#make sure has empty result
#plus 1 showkarma message
#make sure there is a number 1 in karma
#TODO: test keyboard implementation
#TODO: test non existent use cases (userstats where userid doesn't exist, etc)
#TODO: host multiple bots with swarm and split integration tests amoung them
@unittest.skip("TEMPORARY")
def test_userinfo(self):
# self.client.send_command_await(CLEAR_CHAT_COMMAND)
# show_karma_response = self.client.send_command_await(SHOW_KARMA_COMMAND, num_expected=1)
# self.assertEqual(len(show_karma_response.messages), 1)
# message = show_karma_response.messages[0]
# chat_id = message.chat.id
# message_id = message.message_id
# self.client.send_message(chat_id, "+1", reply_to_message_id=message_id)
command = f"{USER_INFO_COMMAND} {self.TEST_BOT_NAME[1:]}"
user_info_response = self.client.send_command_await(command, num_expected=1)
self.assertEqual(len(user_info_response.messages), 1)
@unittest.skip("TEMPORARY")
def test_chatinfo(self):
response = self.client.send_command_await(CHAT_INFO_COMMAND, num_expected=1)
self.assertEqual(len(response.messages), 1)
#TODO: check that number of users with karma is 2
#TODO: Check total reply count
@unittest.skip("test not yet implemented")
def test_history_graph(self):
#TODO: run command, check that a file asset is sent back
#TODO: add another test case for showing a file not being sent when there is no data in the chat
pass
def test_check_chat_karmas(self):
response = self.client.send_command_await(SHOW_KARMA_KEYBOARD_COMMAND, num_expected=1)
keyboards = response.inline_keyboards
# TODO: how to verify that there is history in another chat?
#TODO: perhaps there should be a hidden flag to include chat with the bot
self.assertTrue(keyboards is not None)
self.assertTrue(len(keyboards) > 0)
keyboard = keyboards[0]
karma_result = keyboard.press_button_await(pattern=r'.*', num_expected=1)
bot_name_without_at = self.TEST_BOT_NAME[1:]
#print(karma_result)
did_bot_provide_karma = re.search(f"{bot_name_without_at}", str(karma_result))
self.assertTrue(did_bot_provide_karma)
if __name__ == "__main__":
#TOOD: seperate this into test suites for the various features
unittest.main()
|
StarcoderdataPython
|
12850594
|
import autokeras as ak
from tensorflow.python.util import nest
from tf2cv.models.resnet import ResNet
LAYER_OPTIONS = [[1, 1, 1, 1], [2, 1, 1, 1], [2, 2, 1, 1], [2, 2, 2, 1], [2, 2, 2, 2], [3, 3, 3, 3],
[3, 4, 6, 3]]
class CustomResnetBlock(ak.Block):
def __init__(self, in_size=(224, 224), in_channels=3, layer_options=LAYER_OPTIONS, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.in_size = in_size
self.layers_options = layer_options
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
# Get HP Params for network
bottleneck = hp.Boolean('hp_bottleneck', default=False)
layers_option_idx = list(range(len(self.layers_options)))
layers_sel = hp.Choice('idx_layers', values=layers_option_idx)
layers = self.layers_options[layers_sel]
if self.in_size[0] < 100:
init_block_channels = 16
channels_per_layers = [16, 32, 64]
layers = layers[:3]
else:
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
width_scale = hp.Float('width_scale', min_value=0.5, max_value=1.5, step=0.1)
if width_scale != 1.0:
# it should not change the last block of last layer
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
# Create layers
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=True,
in_channels=self.in_channels,
in_size=self.in_size,
use_with_ak_classification=True).features
output_node = net(input_node)
return output_node
|
StarcoderdataPython
|
3312002
|
from os.path import exists, dirname
from os import makedirs, environ
from kivy.factory import Factory as F
from kivy.properties import StringProperty
from kivy.resources import resource_add_path
from ncis_inspector.controller import discover_classes
try:
from kaki.app import App
IS_KAKI_APP = True
except ImportError:
IS_KAKI_APP = False
if environ.get("DEBUG"):
print("Kaki is missing, use Kivy app, but reloading will be missed")
class Application(App):
CLASSES = {
"InspectorApplicationRoot": "ncis_inspector.app",
"KivyInspectorView": "ncis_inspector.views.view_kivy"
}
# CLASSES = dict(discover_classes()) # Doesn't work cause self deps
AUTORELOADER_PATHS = [
('ncis_inspector', {'recursive': True}),
]
name = StringProperty("NCIS-Dash")
if IS_KAKI_APP:
def build_app(self):
self.load_config()
return F.InspectorApplicationRoot()
else:
def build(self):
self.load_config()
return F.InspectorApplicationRoot()
def load_config(self):
resource_add_path(dirname(__file__))
config = super(Application, self).load_config()
if not config.filename:
config.filename = self.get_application_config()
def build_config(self, config):
config.setdefaults('general', {
'version': '0'
})
def get_application_config(self):
if exists('{}.ini'.format(self.name)):
path = '{}.ini'.format(self.name)
else:
path = '{}/%(appname)s.ini'.format(
self.user_data_dir
)
cfg = super(Application, self).get_application_config(path)
d = dirname(cfg)
if d and not exists(d):
makedirs(d)
return cfg
def main():
Application().run()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3491114
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def traverse(self, root, level):
if(root):
self.max_level = max(level, self.max_level)
if(self.max_level == len(self.res)):
self.res.append([])
self.res[level].append(root.val)
self.traverse(root.left, level+1)
self.traverse(root.right, level+1)
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
self.res = []
self.max_level = 0
self.traverse(root, 0)
self.res.reverse()
return self.res
|
StarcoderdataPython
|
156267
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
# external packages
# local imports
from base.indiClass import IndiClass
class DomeIndi(IndiClass):
"""
"""
__all__ = ['DomeIndi']
def __init__(self, app=None, signals=None, data=None):
self.signals = signals
super().__init__(app=app, data=data, threadPool=app.threadPool)
self.data = data
self.lastAzimuth = None
self.app.update1s.connect(self.updateStatus)
def setUpdateConfig(self, deviceName):
"""
setUpdateRate corrects the update rate of dome devices to get an
defined setting regardless, what is setup in server side.
:param deviceName:
:return: success
"""
if deviceName != self.deviceName:
return False
if self.device is None:
return False
update = self.device.getNumber('POLLING_PERIOD')
if 'PERIOD_MS' not in update:
return False
if update.get('PERIOD_MS', 0) == self.UPDATE_RATE:
return True
update['PERIOD_MS'] = self.UPDATE_RATE
suc = self.client.sendNewNumber(deviceName=deviceName,
propertyName='POLLING_PERIOD',
elements=update,
)
return suc
def updateStatus(self):
"""
updateStatus emits the actual azimuth status every 3 second in case of
opening a window and get the signals late connected as INDI does not
repeat any signal of it's own
:return: true for test purpose
"""
if not self.client.connected:
return False
azimuth = self.data.get('ABS_DOME_POSITION.DOME_ABSOLUTE_POSITION', 0)
self.signals.azimuth.emit(azimuth)
return True
def updateNumber(self, deviceName, propertyName):
"""
updateNumber is called whenever a new number is received in client. it
runs through the device list and writes the number data to the according
locations.
:param deviceName:
:param propertyName:
:return:
"""
if not super().updateNumber(deviceName, propertyName):
return False
for element, value in self.device.getNumber(propertyName).items():
if element == 'DOME_ABSOLUTE_POSITION':
azimuth = self.data.get('ABS_DOME_POSITION.DOME_ABSOLUTE_POSITION',
0)
self.signals.azimuth.emit(azimuth)
slewing = self.device.ABS_DOME_POSITION['state'] == 'Busy'
self.data['Slewing'] = slewing
if element == 'SHUTTER_OPEN':
moving = self.device.DOME_SHUTTER['state'] == 'Busy'
if moving:
self.data['Shutter.Status'] = 'Moving'
else:
self.data['Shutter.Status'] = '-'
return True
def slewToAltAz(self, altitude=0, azimuth=0):
"""
:param altitude:
:param azimuth:
:return: success
"""
if self.device is None:
return False
if self.deviceName is None or not self.deviceName:
return False
position = self.device.getNumber('ABS_DOME_POSITION')
if 'DOME_ABSOLUTE_POSITION' not in position:
return False
position['DOME_ABSOLUTE_POSITION'] = azimuth
suc = self.client.sendNewNumber(deviceName=self.deviceName,
propertyName='ABS_DOME_POSITION',
elements=position,
)
return suc
def openShutter(self):
"""
:return: success
"""
if self.device is None:
return False
if self.deviceName is None or not self.deviceName:
return False
position = self.device.getSwitch('DOME_SHUTTER')
if 'SHUTTER_OPEN' not in position:
return False
position['SHUTTER_OPEN'] = 'On'
position['SHUTTER_CLOSE'] = 'Off'
suc = self.client.sendNewSwitch(deviceName=self.deviceName,
propertyName='DOME_SHUTTER',
elements=position,
)
return suc
def closeShutter(self):
"""
:return: success
"""
if self.device is None:
return False
if self.deviceName is None or not self.deviceName:
return False
position = self.device.getSwitch('DOME_SHUTTER')
if 'SHUTTER_CLOSE' not in position:
return False
position['SHUTTER_OPEN'] = 'Off'
position['SHUTTER_CLOSE'] = 'On'
suc = self.client.sendNewSwitch(deviceName=self.deviceName,
propertyName='DOME_SHUTTER',
elements=position,
)
return suc
def abortSlew(self):
"""
:return: success
"""
if self.device is None:
return False
if self.deviceName is None or not self.deviceName:
return False
position = self.device.getSwitch('DOME_ABORT_MOTION')
if 'ABORT' not in position:
return False
position['ABORT'] = 'On'
suc = self.client.sendNewSwitch(deviceName=self.deviceName,
propertyName='DOME_ABORT_MOTION',
elements=position,
)
return suc
|
StarcoderdataPython
|
6411317
|
<reponame>jcazallasc/lana-python-challenge<filename>app/checkout_backend/uses_cases/offers/multi_buy_offer.py
from .base_offer import BaseOffer
class MultiBuyOffer(BaseOffer):
def get_subtotal_amount(
self,
product_quantity: int,
product_price: int,
) -> int:
num_free_products = product_quantity // self.offer.quantity
return (product_quantity - num_free_products) * product_price
|
StarcoderdataPython
|
6648169
|
<filename>src/dir_handler.py
import pathlib
def get_folder():
folder = pathlib.Path.home() / '.intellijournal'
folder.mkdir(exist_ok=True)
return folder
def get_journal():
journal = get_folder() / 'journal.db'
journal.touch()
return journal
def get_config():
config = get_folder() / 'config'
config.touch()
return config
|
StarcoderdataPython
|
399872
|
<filename>scx11scanner/utils.py
'''
Created on 1.11.2016
@author: <NAME>
'''
def kbinterrupt_decorate(func):
'''
Decorator.
Adds KeyboardInterrupt handling to ControllerBase methods.
'''
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
this = args[0]
this._abort()
raise
return func_wrapper
def wait_decorate(func):
'''
Decorator.
Adds waiting
'''
def func_wrapper(*args, **kwargs):
this = args[0]
wait = kwargs['wait']
func(*args, **kwargs)
if wait:
this.x.wait_to_finish()
return func_wrapper
|
StarcoderdataPython
|
11246165
|
import os
import logging
from logging.handlers import RotatingFileHandler
from celery import Celery
from flask import Flask
from flask_environments import Environments
from flask_mongoengine import MongoEngine
from celery.signals import before_task_publish, task_prerun, task_success, task_failure
import mongoengine
from datetime import datetime
import pytz
from config import celery_config
from db.mongo_models import task_monitor
app = Flask(__name__)
env = Environments(app)
env.from_object('config.flask_config')
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) # os.path.abspath(os.path.dirname(__file__))
file_handler = RotatingFileHandler(basedir+"/logs/logger_flask.log", encoding='utf-8')
formatter = logging.Formatter("%(asctime)s\t%(levelname)s\t%(message)s")
file_handler.setFormatter(formatter)
# 初始化mongodb
monogo_conn = MongoEngine()
monogo_conn.init_app(app)
flask_celery = Celery(app.name, broker = celery_config.CELERY_BROKER_URL)
flask_celery.config_from_object('config.celery_config')
# 引入路由
import apps.flask_route
@before_task_publish.connect()
def task_before_sent_handler(sender=None, headers=None, body=None, **kwargs):
# information about task are located in headers for task messages
# using the task protocol version 2.
mongoengine.connect(**celery_config.mongoengine_SETTINGS)
task_name = sender
args = headers.get('argsrepr')
task_id = headers.get('id')
task_monitor_ob = task_monitor()
task_monitor_ob.task_id = task_id
task_monitor_ob.task_name = task_name
task_monitor_ob.before_sent_args = args
now = datetime.now(tz = pytz.timezone('Asia/Shanghai'))
task_monitor_ob.create_time = now
task_monitor_ob.update_time = now
task_monitor_ob.celery_stask_status = 0
task_monitor_ob.save()
@task_prerun.connect()
def task_prerun_handler(task_id = None, args = None, **kwargs):
mongoengine.connect(**celery_config.mongoengine_SETTINGS)
#information about task are located in headers for task messages
# using the task protocol version 2.
print("task_prerun_handler:" + str(task_id))
task_monitor_ob = task_monitor.objects(task_id= task_id).first()
task_monitor_ob.task_prerun_args = args
task_monitor_ob.celery_stask_status = 1
task_monitor_ob.update_time = datetime.now(tz = pytz.timezone('Asia/Shanghai'))
task_monitor_ob.save()
@task_success.connect()
def task_success_handler(sender=None, headers=None, body=None, **kwargs):
# information about task are located in headers for task messages
# using the task protocol version 2.
mongoengine.connect(**celery_config.mongoengine_SETTINGS)
task_id = sender.request.get('id')
print("task_success_handler:" + str(task_id))
task_monitor_ob = task_monitor.objects(task_id= task_id).first()
task_monitor_ob.celery_stask_status = 5
task_monitor_ob.update_time = datetime.now(tz = pytz.timezone('Asia/Shanghai'))
task_monitor_ob.save()
@task_failure.connect()
def task_failure_handler(sender=None, headers=None, body=None, **kwargs):
# information about task are located in headers for task messages
# using the task protocol version 2.
mongoengine.connect(**celery_config.mongoengine_SETTINGS)
task_id = sender.request.get('id')
task_monitor_ob = task_monitor.objects(task_id= task_id).first()
task_monitor_ob.celery_stask_status = 6
task_monitor_ob.update_time = datetime.now(tz = pytz.timezone('Asia/Shanghai'))
task_monitor_ob.save()
|
StarcoderdataPython
|
1933590
|
<filename>cogs/economy.py
import discord
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from collections import namedtuple, defaultdict
from datetime import datetime
from random import randint
from copy import deepcopy
from .utils import checks
from cogs.utils.chat_formatting import pagify, box
from __main__ import send_cmd_help
import os
import time
import logging
default_settings = {"PAYDAY_TIME": 300, "PAYDAY_CREDITS": 120,
"SLOT_MIN": 5, "SLOT_MAX": 100, "SLOT_TIME": 0,
"REGISTER_CREDITS": 0}
slot_payouts = """Slot machine payouts:
:two: :two: :six: Bet * 5000
:four_leaf_clover: :four_leaf_clover: :four_leaf_clover: +1000
:cherries: :cherries: :cherries: +800
:two: :six: Bet * 4
:cherries: :cherries: Bet * 3
Three symbols: +500
Two symbols: Bet * 2"""
class BankError(Exception):
pass
class AccountAlreadyExists(BankError):
pass
class NoAccount(BankError):
pass
class InsufficientBalance(BankError):
pass
class NegativeValue(BankError):
pass
class SameSenderAndReceiver(BankError):
pass
class Bank:
def __init__(self, bot, file_path):
self.accounts = dataIO.load_json(file_path)
self.bot = bot
def create_account(self, user, *, initial_balance=0):
server = user.server
if not self.account_exists(user):
if server.id not in self.accounts:
self.accounts[server.id] = {}
if user.id in self.accounts: # Legacy account
balance = self.accounts[user.id]["balance"]
else:
balance = initial_balance
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
account = {"name": user.name,
"balance": balance,
"created_at": timestamp
}
self.accounts[server.id][user.id] = account
self._save_bank()
return self.get_account(user)
else:
raise AccountAlreadyExists()
def account_exists(self, user):
try:
self._get_account(user)
except NoAccount:
return False
return True
def withdraw_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
if account["balance"] >= amount:
account["balance"] -= amount
self.accounts[server.id][user.id] = account
self._save_bank()
else:
raise InsufficientBalance()
def deposit_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
account["balance"] += amount
self.accounts[server.id][user.id] = account
self._save_bank()
def set_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
account["balance"] = amount
self.accounts[server.id][user.id] = account
self._save_bank()
def transfer_credits(self, sender, receiver, amount):
if amount < 0:
raise NegativeValue()
if sender is receiver:
raise SameSenderAndReceiver()
if self.account_exists(sender) and self.account_exists(receiver):
sender_acc = self._get_account(sender)
if sender_acc["balance"] < amount:
raise InsufficientBalance()
self.withdraw_credits(sender, amount)
self.deposit_credits(receiver, amount)
else:
raise NoAccount()
def can_spend(self, user, amount):
account = self._get_account(user)
if account["balance"] >= amount:
return True
else:
return False
def wipe_bank(self, server):
self.accounts[server.id] = {}
self._save_bank()
def get_server_accounts(self, server):
if server.id in self.accounts:
raw_server_accounts = deepcopy(self.accounts[server.id])
accounts = []
for k, v in raw_server_accounts.items():
v["id"] = k
v["server"] = server
acc = self._create_account_obj(v)
accounts.append(acc)
return accounts
else:
return []
def get_all_accounts(self):
accounts = []
for server_id, v in self.accounts.items():
server = self.bot.get_server(server_id)
if server is None:
# Servers that have since been left will be ignored
# Same for users_id from the old bank format
continue
raw_server_accounts = deepcopy(self.accounts[server.id])
for k, v in raw_server_accounts.items():
v["id"] = k
v["server"] = server
acc = self._create_account_obj(v)
accounts.append(acc)
return accounts
def get_balance(self, user):
account = self._get_account(user)
return account["balance"]
def get_account(self, user):
acc = self._get_account(user)
acc["id"] = user.id
acc["server"] = user.server
return self._create_account_obj(acc)
def _create_account_obj(self, account):
account["member"] = account["server"].get_member(account["id"])
account["created_at"] = datetime.strptime(account["created_at"],
"%Y-%m-%d %H:%M:%S")
Account = namedtuple("Account", "id name balance "
"created_at server member")
return Account(**account)
def _save_bank(self):
dataIO.save_json("data/economy/bank.json", self.accounts)
def _get_account(self, user):
server = user.server
try:
return deepcopy(self.accounts[server.id][user.id])
except KeyError:
raise NoAccount()
class Economy:
"""Economy
Get rich and have fun with imaginary currency!"""
def __init__(self, bot):
global default_settings
self.bot = bot
self.bank = Bank(bot, "data/economy/bank.json")
self.file_path = "data/economy/settings.json"
self.settings = dataIO.load_json(self.file_path)
if "PAYDAY_TIME" in self.settings: # old format
default_settings = self.settings
self.settings = {}
self.settings = defaultdict(lambda: default_settings, self.settings)
self.payday_register = defaultdict(dict)
self.slot_register = defaultdict(dict)
@commands.group(name="bank", pass_context=True)
async def _bank(self, ctx):
"""Bank operations"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@_bank.command(pass_context=True, no_pm=True)
async def register(self, ctx):
"""Registers an account at the Twentysix bank"""
user = ctx.message.author
credits = 0
if ctx.message.server.id in self.settings:
credits = self.settings[ctx.message.server.id].get("REGISTER_CREDITS", 0)
try:
account = self.bank.create_account(user)
await self.bot.say("{} Account opened. Current balance: {}".format(
user.mention, account.balance))
except AccountAlreadyExists:
await self.bot.say("{} You already have an account at the"
" Twentysix bank.".format(user.mention))
@_bank.command(pass_context=True)
async def balance(self, ctx, user: discord.Member=None):
"""Shows balance of user.
Defaults to yours."""
if not user:
user = ctx.message.author
try:
await self.bot.say("{} Your balance is: {}".format(
user.mention, self.bank.get_balance(user)))
except NoAccount:
await self.bot.say("{} You don't have an account at the"
" Twentysix bank. Type `{}bank register`"
" to open one.".format(user.mention,
ctx.prefix))
else:
try:
await self.bot.say("{}'s balance is {}".format(
user.name, self.bank.get_balance(user)))
except NoAccount:
await self.bot.say("That user has no bank account.")
@_bank.command(pass_context=True)
async def transfer(self, ctx, user: discord.Member, sum: int):
"""Transfer credits to other users"""
author = ctx.message.author
try:
self.bank.transfer_credits(author, user, sum)
logger.info("{}({}) transferred {} credits to {}({})".format(
author.name, author.id, sum, user.name, user.id))
await self.bot.say("{} credits have been transferred to {}'s"
" account.".format(sum, user.name))
except NegativeValue:
await self.bot.say("You need to transfer at least 1 credit.")
except SameSenderAndReceiver:
await self.bot.say("You can't transfer credits to yourself.")
except InsufficientBalance:
await self.bot.say("You don't have that sum in your bank account.")
except NoAccount:
await self.bot.say("That user has no bank account.")
@_bank.command(name="set", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _set(self, ctx, user: discord.Member, sum: int):
"""Sets credits of user's bank account
Admin/owner restricted."""
author = ctx.message.author
try:
self.bank.set_credits(user, sum)
logger.info("{}({}) set {} credits to {} ({})".format(
author.name, author.id, str(sum), user.name, user.id))
await self.bot.say("{}'s credits have been set to {}".format(
user.name, str(sum)))
except NoAccount:
await self.bot.say("User has no bank account.")
@commands.command(pass_context=True, no_pm=True)
async def payday(self, ctx): # TODO
"""Get some free credits"""
author = ctx.message.author
server = author.server
id = author.id
if self.bank.account_exists(author):
if id in self.payday_register[server.id]:
seconds = abs(self.payday_register[server.id][
id] - int(time.perf_counter()))
if seconds >= self.settings[server.id]["PAYDAY_TIME"]:
self.bank.deposit_credits(author, self.settings[
server.id]["PAYDAY_CREDITS"])
self.payday_register[server.id][
id] = int(time.perf_counter())
await self.bot.say(
"{} Here, take some credits. Enjoy! (+{}"
" credits!)".format(
author.mention,
str(self.settings[server.id]["PAYDAY_CREDITS"])))
else:
dtime = self.display_time(
self.settings[server.id]["PAYDAY_TIME"] - seconds)
await self.bot.say(
"{} Too soon. For your next payday you have to"
" wait {}.".format(author.mention, dtime))
else:
self.payday_register[server.id][id] = int(time.perf_counter())
self.bank.deposit_credits(author, self.settings[
server.id]["PAYDAY_CREDITS"])
await self.bot.say(
"{} Here, take some credits. Enjoy! (+{} credits!)".format(
author.mention,
str(self.settings[server.id]["PAYDAY_CREDITS"])))
else:
await self.bot.say("{} You need an account to receive credits."
" Type `{}bank register` to open one.".format(
author.mention, ctx.prefix))
@commands.group(pass_context=True)
async def leaderboard(self, ctx):
"""Server / global leaderboard
Defaults to server"""
if ctx.invoked_subcommand is None:
await ctx.invoke(self._server_leaderboard)
@leaderboard.command(name="server", pass_context=True)
async def _server_leaderboard(self, ctx, top: int=10):
"""Prints out the server's leaderboard
Defaults to top 10"""
# Originally coded by Airenkun - edited by irdumb
server = ctx.message.server
if top < 1:
top = 10
bank_sorted = sorted(self.bank.get_server_accounts(server),
key=lambda x: x.balance, reverse=True)
if len(bank_sorted) < top:
top = len(bank_sorted)
topten = bank_sorted[:top]
highscore = ""
place = 1
for acc in topten:
highscore += str(place).ljust(len(str(top)) + 1)
highscore += (acc.name + " ").ljust(23 - len(str(acc.balance)))
highscore += str(acc.balance) + "\n"
place += 1
if highscore != "":
for page in pagify(highscore, shorten_by=12):
await self.bot.say(box(page, lang="py"))
else:
await self.bot.say("There are no accounts in the bank.")
@leaderboard.command(name="global")
async def _global_leaderboard(self, top: int=10):
"""Prints out the global leaderboard
Defaults to top 10"""
if top < 1:
top = 10
bank_sorted = sorted(self.bank.get_all_accounts(),
key=lambda x: x.balance, reverse=True)
unique_accounts = []
for acc in bank_sorted:
if not self.already_in_list(unique_accounts, acc):
unique_accounts.append(acc)
if len(unique_accounts) < top:
top = len(unique_accounts)
topten = unique_accounts[:top]
highscore = ""
place = 1
for acc in topten:
highscore += str(place).ljust(len(str(top)) + 1)
highscore += ("{} |{}| ".format(acc.name, acc.server.name)
).ljust(23 - len(str(acc.balance)))
highscore += str(acc.balance) + "\n"
place += 1
if highscore != "":
for page in pagify(highscore, shorten_by=12):
await self.bot.say(box(page, lang="py"))
else:
await self.bot.say("There are no accounts in the bank.")
def already_in_list(self, accounts, user):
for acc in accounts:
if user.id == acc.id:
return True
return False
@commands.command()
async def payouts(self):
"""Shows slot machine payouts"""
await self.bot.whisper(slot_payouts)
@commands.command(pass_context=True, no_pm=True)
async def slot(self, ctx, bid: int):
"""Play the slot machine"""
author = ctx.message.author
server = author.server
if not self.bank.account_exists(author):
await self.bot.say("{} You need an account to use the slot machine. Type `{}bank register` to open one.".format(author.mention, ctx.prefix))
return
if self.bank.can_spend(author, bid):
if bid >= self.settings[server.id]["SLOT_MIN"] and bid <= self.settings[server.id]["SLOT_MAX"]:
if author.id in self.slot_register:
if abs(self.slot_register[author.id] - int(time.perf_counter())) >= self.settings[server.id]["SLOT_TIME"]:
self.slot_register[author.id] = int(
time.perf_counter())
await self.slot_machine(ctx.message, bid)
else:
await self.bot.say("Slot machine is still cooling off! Wait {} seconds between each pull".format(self.settings[server.id]["SLOT_TIME"]))
else:
self.slot_register[author.id] = int(time.perf_counter())
await self.slot_machine(ctx.message, bid)
else:
await self.bot.say("{0} Bid must be between {1} and {2}.".format(author.mention, self.settings[server.id]["SLOT_MIN"], self.settings[server.id]["SLOT_MAX"]))
else:
await self.bot.say("{0} You need an account with enough funds to play the slot machine.".format(author.mention))
async def slot_machine(self, message, bid):
reel_pattern = [":cherries:", ":cookie:", ":two:", ":four_leaf_clover:",
":cyclone:", ":sunflower:", ":six:", ":mushroom:", ":heart:", ":snowflake:"]
# padding prevents index errors
padding_before = [":mushroom:", ":heart:", ":snowflake:"]
padding_after = [":cherries:", ":cookie:", ":two:"]
reel = padding_before + reel_pattern + padding_after
reels = []
for i in range(0, 3):
n = randint(3, 12)
reels.append([reel[n - 1], reel[n], reel[n + 1]])
line = [reels[0][1], reels[1][1], reels[2][1]]
display_reels = "~~\n~~ " + \
reels[0][0] + " " + reels[1][0] + " " + reels[2][0] + "\n"
display_reels += ">" + reels[0][1] + " " + \
reels[1][1] + " " + reels[2][1] + "\n"
display_reels += " " + reels[0][2] + " " + \
reels[1][2] + " " + reels[2][2] + "\n"
if line[0] == ":two:" and line[1] == ":two:" and line[2] == ":six:":
bid = bid * 5000
slotMsg = "{}{} 226! Your bet is multiplied * 5000! {}! ".format(
display_reels, message.author.mention, str(bid))
elif line[0] == ":four_leaf_clover:" and line[1] == ":four_leaf_clover:" and line[2] == ":four_leaf_clover:":
bid += 1000
slotMsg = "{}{} Three FLC! +1000! ".format(
display_reels, message.author.mention)
elif line[0] == ":cherries:" and line[1] == ":cherries:" and line[2] == ":cherries:":
bid += 800
slotMsg = "{}{} Three cherries! +800! ".format(
display_reels, message.author.mention)
elif line[0] == line[1] == line[2]:
bid += 500
slotMsg = "{}{} Three symbols! +500! ".format(
display_reels, message.author.mention)
elif line[0] == ":two:" and line[1] == ":six:" or line[1] == ":two:" and line[2] == ":six:":
bid = bid * 4
slotMsg = "{}{} 26! Your bet is multiplied * 4! {}! ".format(
display_reels, message.author.mention, str(bid))
elif line[0] == ":cherries:" and line[1] == ":cherries:" or line[1] == ":cherries:" and line[2] == ":cherries:":
bid = bid * 3
slotMsg = "{}{} Two cherries! Your bet is multiplied * 3! {}! ".format(
display_reels, message.author.mention, str(bid))
elif line[0] == line[1] or line[1] == line[2]:
bid = bid * 2
slotMsg = "{}{} Two symbols! Your bet is multiplied * 2! {}! ".format(
display_reels, message.author.mention, str(bid))
else:
slotMsg = "{}{} Nothing! Lost bet. ".format(
display_reels, message.author.mention)
self.bank.withdraw_credits(message.author, bid)
slotMsg += "\n" + \
" Credits left: {}".format(
self.bank.get_balance(message.author))
await self.bot.send_message(message.channel, slotMsg)
return True
self.bank.deposit_credits(message.author, bid)
slotMsg += "\n" + \
" Current credits: {}".format(
self.bank.get_balance(message.author))
await self.bot.send_message(message.channel, slotMsg)
@commands.group(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def economyset(self, ctx):
"""Changes economy module settings"""
server = ctx.message.server
settings = self.settings[server.id]
if ctx.invoked_subcommand is None:
msg = "```"
for k, v in settings.items():
msg += "{}: {}\n".format(k, v)
msg += "```"
await send_cmd_help(ctx)
await self.bot.say(msg)
@economyset.command(pass_context=True)
async def slotmin(self, ctx, bid: int):
"""Minimum slot machine bid"""
server = ctx.message.server
self.settings[server.id]["SLOT_MIN"] = bid
await self.bot.say("Minimum bid is now " + str(bid) + " credits.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def slotmax(self, ctx, bid: int):
"""Maximum slot machine bid"""
server = ctx.message.server
self.settings[server.id]["SLOT_MAX"] = bid
await self.bot.say("Maximum bid is now " + str(bid) + " credits.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def slottime(self, ctx, seconds: int):
"""Seconds between each slots use"""
server = ctx.message.server
self.settings[server.id]["SLOT_TIME"] = seconds
await self.bot.say("Cooldown is now " + str(seconds) + " seconds.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def paydaytime(self, ctx, seconds: int):
"""Seconds between each payday"""
server = ctx.message.server
self.settings[server.id]["PAYDAY_TIME"] = seconds
await self.bot.say("Value modified. At least " + str(seconds) + " seconds must pass between each payday.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def paydaycredits(self, ctx, credits: int):
"""Credits earned each payday"""
server = ctx.message.server
self.settings[server.id]["PAYDAY_CREDITS"] = credits
await self.bot.say("Every payday will now give " + str(credits) + " credits.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def registercredits(self, ctx, credits: int):
"""Credits given on registering an account"""
server = ctx.message.server
if credits < 0:
credits = 0
self.settings[server.id]["REGISTER_CREDITS"] = credits
await self.bot.say("Registering an account will now give {} credits.".format(credits))
dataIO.save_json(self.file_path, self.settings)
# What would I ever do without stackoverflow?
def display_time(self, seconds, granularity=2):
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/economy"):
print("Creating data/economy folder...")
os.makedirs("data/economy")
def check_files():
f = "data/economy/settings.json"
if not dataIO.is_valid_json(f):
print("Creating default economy's settings.json...")
dataIO.save_json(f, {})
f = "data/economy/bank.json"
if not dataIO.is_valid_json(f):
print("Creating empty bank.json...")
dataIO.save_json(f, {})
def setup(bot):
global logger
check_folders()
check_files()
logger = logging.getLogger("red.economy")
if logger.level == 0:
# Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(
filename='data/economy/economy.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter(
'%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(Economy(bot))
|
StarcoderdataPython
|
9682486
|
<gh_stars>0
"""
"""
import openpyxl
xlsx = '../resource/excel/dimensions.xlsx'
wb = openpyxl.Workbook()
sheet = wb['Sheet']
sheet['A1'] = 'Tall row'
sheet['A2'] = 'Wide column'
sheet.row_dimensions[1].height = 70
sheet.column_dimensions['B'].width = 20
wb.save(xlsx)
print('Generate Success')
|
StarcoderdataPython
|
153697
|
from functools import singledispatch
from functools import update_wrapper
class singledispatchmethod:
"""Single-dispatch generic method descriptor.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError(f"{func!r} is not callable or a descriptor")
self.dispatcher = singledispatch(func)
self.func = func
def register(self, cls, method=None):
"""generic_method.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_method*.
"""
return self.dispatcher.register(cls, func=method)
def __get__(self, obj, cls=None):
def _method(*args, **kwargs):
method = self.dispatcher.dispatch(args[0].__class__)
return method.__get__(obj, cls)(*args, **kwargs)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method.register = self.register
update_wrapper(_method, self.func)
return _method
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
|
StarcoderdataPython
|
3469263
|
from label_cnn import LabelCNN
from full_cnn import FullCNN
from keras import Model
from keras.layers import Activation, Concatenate, Add, Input, Cropping2D, Permute
from keras_helpers import BasicLayers, ResNetLayers, InceptionResNetLayer, RedNetLayers
class Inceptuous(LabelCNN):
def __init__(self, model_name='Inceptuous'):
super().__init__(model_name=model_name)
def build_model(self):
layers = BasicLayers(relu_version=self.RELU_VERSION)
input_tensor = Input(shape=self.INPUT_SHAPE)
x = input_tensor
x = layers.cbr(x, 32, kernel_size=(3, 3), strides=(2, 2), dilation_rate=(1, 1), padding='same')
# Half-size, 32 features
x1 = layers.cbr(x, 32, kernel_size=(3, 3), strides=(2, 2), dilation_rate=(1, 1), padding='same')
x2 = layers._max_pool(x, pool=(3, 3), strides=(2, 2), padding='same')
x = Concatenate(axis=1)([x1, x2])
x = layers._spatialdropout(x)
# Half-size, 64 features
x1 = layers.cbr(x, 64, kernel_size=(1, 1))
x1 = layers.cbr(x1, 64, kernel_size=(3, 3), strides=(2, 2))
x2 = layers.cbr(x, 64, kernel_size=(1, 1))
x2 = layers.cbr(x2, 64, kernel_size=(1, 5))
x2 = layers.cbr(x2, 64, kernel_size=(5, 1))
x2 = layers.cbr(x2, 64, kernel_size=(3, 3), strides=(2, 2))
x = Concatenate(axis=1)([x1, x2])
x = layers._spatialdropout(x)
# Half-size, 128 features
x1 = layers.cbr(x, 128, kernel_size=(3, 3), strides=(2, 2))
x2 = layers._max_pool(x, pool=(3, 3))
x = Concatenate(axis=1)([x1, x2])
x = layers._spatialdropout(x)
# Half-size, 256 features
x1 = layers.cbr(x, 256, kernel_size=(1, 1))
x1 = layers.cbr(x1, 256, kernel_size=(3, 3), strides=(2, 2))
x2 = layers.cbr(x, 256, kernel_size=(1, 1))
x2 = layers.cbr(x2, 256, kernel_size=(1, 3))
x2 = layers.cbr(x2, 256, kernel_size=(3, 1))
x2 = layers.cbr(x2, 256, kernel_size=(3, 3), strides=(2, 2))
x = Concatenate(axis=1)([x1, x2])
x = layers._spatialdropout(x)
# Half-size, 512 features
x = layers._flatten(x)
x = layers._dense(x, 2 * ((self.IMAGE_SIZE * self.IMAGE_SIZE) // (self.PATCH_SIZE * self.PATCH_SIZE)))
x = layers._act_fun(x)
x = layers._dense(x, self.NB_CLASSES) # Returns a logit
x = Activation('softmax')(x) # No logit anymore
self.model = Model(inputs=input_tensor, outputs=x)
class InceptionResNet(LabelCNN):
def __init__(self):
super().__init__(image_size=128, batch_size=32, model_name="Inception-ResNet-v2")
def build_model(self):
incres = InceptionResNetLayer(relu_version=self.RELU_VERSION, half_size=True)
# incres = InceptionResNetLayer()
input_tensor = Input(shape=self.INPUT_SHAPE)
x = input_tensor
x = incres.stem(x)
for i in range(5):
x = incres.block16(x)
x = incres.block7(x)
x = incres._act_fun(x)
for i in range(10):
x = incres.block17(x)
x = incres.block18(x)
x = incres._act_fun(x)
for i in range(5):
x = incres.block19(x)
#x = incres.cbr(x, 1024, (1, 1))
#x = incres.cbr(x, 256, (1, 1))
x = incres._flatten(x)
x = incres._dense(x, 6 * ((self.IMAGE_SIZE * self.IMAGE_SIZE) // (self.PATCH_SIZE * self.PATCH_SIZE)))
x = incres._dropout(x, 0.5)
x = incres._act_fun(x)
x = incres._dense(x, self.NB_CLASSES) # Returns a logit
x = Activation('softmax')(x) # No logit anymore
self.model = Model(inputs=input_tensor, outputs=x)
class ResNet(LabelCNN):
FULL_PREACTIVATION = False
def __init__(self, model_name="ResNet18", full_preactivation=False):
super().__init__(image_size=112, batch_size=16, relu_version='parametric', model_name=model_name)
self.FULL_PREACTIVATION = full_preactivation
def build_model(self):
resnet = ResNetLayers(relu_version=self.RELU_VERSION, full_preactivation=self.FULL_PREACTIVATION)
input_tensor = Input(shape=self.INPUT_SHAPE)
x = input_tensor
x = resnet.stem(x)
for i, layers in enumerate(resnet.REPETITIONS_SMALL):
for j in range(layers):
x = resnet.vanilla(x, resnet.FEATURES[i], (j == 0))
x = resnet._flatten(x)
x = resnet._dense(x, 2 * ((self.IMAGE_SIZE * self.IMAGE_SIZE) // (self.PATCH_SIZE * self.PATCH_SIZE)))
# x = resnet._dropout(x, 0.5)
x = resnet._act_fun(x)
x = resnet._dense(x, self.NB_CLASSES) # Returns a logit
x = Activation('softmax')(x) # No logit anymore
self.model = Model(inputs=input_tensor, outputs=x)
class RedNet(FullCNN):
FULL_PREACTIVATION = False
def __init__(self, model_name="RedNet50", full_preactivation=False):
super().__init__(image_size=608, batch_size=2, model_name=model_name)
self.FULL_PREACTIVATION = full_preactivation
def build_model(self):
rednet = RedNetLayers(relu_version=self.RELU_VERSION, full_preactivation=self.FULL_PREACTIVATION)
agent_list = []
agent_layers = []
input_tensor = Input(shape=self.INPUT_SHAPE)
x = input_tensor
x, a0 = rednet.stem(x)
agent_layers.append(rednet.agent_layer(a0, rednet.FEATURES[0]))
for i, layers in enumerate(rednet.REPETITIONS_NORMAL):
if i == 0:
for j in range(layers):
x = rednet.bottleneck(x, rednet.FEATURES[i], (j == 0))
else:
for j in range(layers):
x = rednet.bottleneck_down(x, rednet.FEATURES[i], (j == 0))
agent_list.append(x)
agent_layers = []
for i in range(len(agent_list)):
agent_layers.append(rednet.agent_layer(agent_list[i], rednet.FEATURES[i]))
x = agent_layers.pop()
for i, layers in enumerate(rednet.REPETITIONS_UP_NORMAL):
for j in range(layers):
x = rednet.residual_up(x, rednet.FEATURES_UP[i], (j == layers - 1))
if i + 1 != len(rednet.REPETITIONS_UP_NORMAL):
x = Add()([x, agent_layers.pop()])
x = rednet.last_block(x)
x = rednet._tcbr(x, self.NB_CLASSES, kernel_size=(2, 2), strides=(2, 2))
x = Permute((2, 3, 1))(x) # Permute to allow softmax to work
x = Activation('softmax')(x) # No logit anymore
x = Permute((3, 1, 2))(x) # Permute back data to have normal format
self.model = Model(inputs=input_tensor, outputs=x)
class SimpleNet(LabelCNN):
def __init__(self, model_name='SimpleNet'):
super().__init__(image_size=72, batch_size=64, relu_version='parametric', model_name=model_name)
def build_model(self):
layers = BasicLayers(relu_version=self.RELU_VERSION)
input_tensor = Input(shape=self.INPUT_SHAPE)
x = input_tensor
x = layers.cbr(x, 64, kernel_size=(3, 3))
for i in range(3):
x = layers.cbr(x, 128, kernel_size=(3, 3))
x = layers._max_pool(x, pool=(2, 2))
for i in range(2):
x = layers.cbr(x, 128, kernel_size=(3, 3))
x = layers.cbr(x, 128, kernel_size=(3, 3))
x = layers._max_pool(x, pool=(2, 2))
for i in range(2):
x = layers.cbr(x, 128, kernel_size=(3, 3))
x = layers._max_pool(x, pool=(2, 2))
x = layers.cbr(x, 128, kernel_size=(1, 1))
x = layers.cbr(x, 128, kernel_size=(1, 1))
x = layers._max_pool(x, pool=(2, 2))
x = layers.cbr(x, 128, kernel_size=(3, 3))
x = layers._max_pool(x, pool=(2, 2))
x = layers._flatten(x)
x = layers._dense(x, 6 * ((self.IMAGE_SIZE * self.IMAGE_SIZE) // (self.PATCH_SIZE * self.PATCH_SIZE)))
x = layers._dropout(x, 0.5)
x = layers._act_fun(x)
x = layers._dense(x, self.NB_CLASSES) # Returns a logit
x = Activation('softmax')(x) # No logit anymore
self.model = Model(inputs=input_tensor, outputs=x)
class EasyNet(LabelCNN):
def __init__(self, model_name='EasyNet'):
super().__init__(image_size=72, batch_size=64, relu_version='parametric', model_name=model_name)
def build_model(self):
layers = BasicLayers(relu_version=self.RELU_VERSION)
input_tensor = Input(shape=self.INPUT_SHAPE)
x = input_tensor
x = layers.cbr(x, 64, (5, 5))
x = layers._max_pool(x, pool=(2, 2))
x = layers._spatialdropout(x, 0.25)
x = layers.cbr(x, 128, (3, 3))
x = layers._max_pool(x, pool=(2, 2))
x = layers._spatialdropout(x, 0.25)
x = layers.cbr(x, 256, (3, 3))
x = layers._max_pool(x, pool=(2, 2))
x = layers._spatialdropout(x, 0.25)
x = layers.cbr(x, 512, (3, 3))
x = layers._max_pool(x, pool=(2, 2))
x = layers._spatialdropout(x, 0.25)
x = layers._flatten(x)
x = layers._dense(x, 6 * ((self.IMAGE_SIZE * self.IMAGE_SIZE) // (self.PATCH_SIZE * self.PATCH_SIZE)))
x = layers._dropout(x, 0.5)
x = layers._act_fun(x)
x = layers._dense(x, self.NB_CLASSES) # Returns a logit
x = Activation('softmax')(x) # No logit anymore
self.model = Model(inputs=input_tensor, outputs=x)
|
StarcoderdataPython
|
6583829
|
from flask import Flask
from flask_restplus import Api, Resource, fields
app = Flask(__name__)
api = Api(app)
a_language = api.model('Language', {'language': fields.String('The language')})
languages = []
python = {'language':'Python'}
languages.append(python)
@api.route('/language')
class Language(Resource):
def get(self):
return languages
@api.expect(a_language)
def post(self):
languages.append(api.payload)
return {'results': "Language added"}, 201
if __name__ == '__main__':
app.run(debug=True)
|
StarcoderdataPython
|
238027
|
# Generated by Django 3.2.6 on 2021-08-16 06:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("syncing", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="signup",
name="attendance_pending",
field=models.CharField(default="", max_length=15),
),
migrations.AddField(
model_name="signup",
name="result_pending",
field=models.CharField(default="", max_length=15),
),
]
|
StarcoderdataPython
|
3346455
|
#!/usr/bin/python3
from PIL import Image
from imutils.video import VideoStream
from imutils.video import FPS
from imutils.object_detection import non_max_suppression
import numpy as np
import argparse
import imutils
import time
import cv2
import pytesseract
from pytesseract import Output
import os
import re
def run():
# Check if Tesseract is installed on the computer
try:
output = os.popen("which tesseract").read()
tesseract_match = re.match("(.*?)(?=\\n)", output)
tesseract_path = tesseract_match.group(1)
pytesseract.pytesseract.tesseract_cmd = tesseract_path
except:
print("You don't seem to have Tesseract installed. Have a look here: https://tesseract-ocr.github.io/tessdoc/Downloads.html")
# Set window and lecture configurations
configs = [
("min_confidence", 0.3),
("width", 288),
("height", 288),
("east", "frozen_east_text_detection.pb"),
]
args = {k: v for k, v in configs}
# Initialize the original frame dimensions, the new frame dimensions and the ratio between the dimensions
(W, H) = (None, None)
(newW, newH) = (args["width"], args["height"])
(rW, rH) = (None, None)
# Define the two output layer names for the EAST detector model that we are interested:
# The first is the output probabilities
# The second can be used to derive the bounding box coordinates of text
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# Load the pre-trained EAST text detector
print("[INFO] Loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
# Grab the reference to the webcam
print("[INFO] Starting video stream...")
print("[INFO] Type the 'Q' key on the Text Detection Window to end the execution.")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# Start the FPS throughput estimator
fps = FPS().start()
fn = 0 # Frame number
# Loop over frames from the video stream
while True:
# Grab the current frame
frame = vs.read()
# Check to see if we have reached the end of the stream
if frame is None:
break
# Resize the frame, maintaining the aspect ratio
frame = imutils.resize(frame, width=1000)
orig = frame.copy()
# If our frame dimensions are None
# We still need to compute the ratio of old frame dimensions to new frame dimensions
if W is None or H is None:
(H, W) = frame.shape[:2]
rW = W / float(newW)
rH = H / float(newH)
# Resize the frame, this time ignoring aspect ratio
frame = cv2.resize(frame, (newW, newH))
# Construct a blob from the frame and then perform a forward pass
# of the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(
frame,
1.0,
(newW, newH),
(123.68, 116.78, 103.94),
swapRB=True,
crop=False
)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
# Decode the predictions, then apply non-maxima suppression to
# suppress weak, overlapping bounding boxes
(rects, confidences) = decode_predictions(scores, geometry, args)
boxes = non_max_suppression(np.array(rects), probs=confidences)
# Loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# Scale the bounding box coordinates based on the respective ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
# Draw the bounding box on the frame
cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)
# Update the FPS counter
fps.update()
# Show the output frame on screen
cv2.imshow("Text Detection", orig)
# Print extracted text from the frame if at least 3 characters are identified
pwd = os.getcwd()
cv2.imwrite("{}/image.jpg".format(pwd), get_grayscale(orig))
try:
fn += 1
text = pytesseract.image_to_string(Image.open("{}/image.jpg".format(pwd)), lang = "eng")
val = re.search('[a-zA-Z]{3,}',text)
if val[0].isalpha():
print("\033[1m✅ Extracted Text ({})\033[0m".format(fn))
print(text)
except:
pass
key = cv2.waitKey(1) & 0xFF
# Break the loop if the `q` key is pressed on output frame screen
if key == ord("q"):
break
# Stop the timer and display FPS information
fps.stop()
print("[INFO] Elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] Approx. FPS: {:.2f}".format(fps.fps()))
# Release the webcam pointer
vs.stop()
# Close all windows
cv2.destroyAllWindows()
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def increase_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def decode_predictions(scores, geometry, args):
# Grab the number of rows and columns from the scores volume, then initialize
# a set of bounding box rectangles and corresponding confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
# Loop over the number of rows
for y in range(0, numRows):
# Extract the scores (probabilities), followed by the geometrical data
# used to derive potential bounding box coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# Loop over the number of columns
for x in range(0, numCols):
# If the score does not have sufficient probability, ignore it
if scoresData[x] < args["min_confidence"]:
continue
# Compute the offset factor as our resulting feature maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# Extract the rotation angle for the prediction andthen compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# Use the geometry volume to derive the width and height of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# Compute both the starting and ending (x, y)-coordinates for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# Add the bounding box coordinates and probability score to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# Return a tuple of the bounding boxes and associated confidences
return (rects, confidences)
|
StarcoderdataPython
|
4882546
|
import unittest
import json
import pandas as pd
import numpy as np
from assistant_dialog_skill_analysis.utils import skills_util
from assistant_dialog_skill_analysis.data_analysis import divergence_analyzer
class TestDivergenceAnalyzer(unittest.TestCase):
"""Test for Divergence Analyzer module"""
def setUp(self):
unittest.TestCase.setUp(self)
with open("tests/resources/test_workspaces/skill-Customer-Care-Sample.json",
"r") as skill_file:
workspace_data, workspace_vocabulary = \
skills_util.extract_workspace_data(json.load(skill_file))
self.workspace_df = pd.DataFrame(workspace_data)
self.train_set_pd = pd.DataFrame({'utterance': ['boston is close to new york'],
'intent': ['Boston_New_York']})
self.test_set_pd = pd.DataFrame(
{'utterance': ['both boston and new york are on east coast',
'boston is close to new york'],
'intent': ['Boston_New_York', 'Boston_New_York']})
def test_label_percentage(self):
label_percentage_dict = divergence_analyzer._label_percentage(self.workspace_df)
label_percentage_vec = np.array(list(label_percentage_dict.values()))
self.assertEqual(np.all(label_percentage_vec > 0), True, "label percentage test fail")
self.assertEqual(np.sum(label_percentage_vec), 1, "label percentage test fail")
def test_train_test_vocab_difference(self):
train_vocab, test_vocab = \
divergence_analyzer._train_test_vocab_difference(self.train_set_pd, self.test_set_pd)
self.assertEqual(train_vocab, set(['boston', 'is', 'close', 'to', 'new', 'york']),
"train test vocab difference test fail")
def test_train_test_uttterance_length_difference(self):
temp_df = divergence_analyzer._train_test_utterance_length_difference(self.train_set_pd,
self.test_set_pd)
self.assertEqual(temp_df.iloc[0]['Absolute Difference'],
1.5, 'train test utterance length differene test fail')
def test_train_test_label_difference(self):
# Test 1
percentage_dict1 = {'Intent1': .5, 'Intent2': .5}
percentage_dict2 = {'Intent1': .5, 'Intent2': .5}
missing_labels, difference_dict, js_distance = \
divergence_analyzer._train_test_label_difference(percentage_dict1, percentage_dict2)
self.assertEqual(js_distance, 0, "train test difference test fail")
self.assertEqual(missing_labels, [], "train test difference test fail")
self.assertEqual(difference_dict['Intent1'], [50, 50, 0], "train test difference test fail")
# Test 2
percentage_dict1 = {'Intent1': 1, 'Intent2': 0}
percentage_dict2 = {'Intent1': 1}
missing_labels, difference_dict, js_distance = \
divergence_analyzer._train_test_label_difference(percentage_dict1, percentage_dict2)
self.assertEqual(js_distance, 0, "train test difference test fail")
self.assertEqual(missing_labels, ['Intent2'], "train test difference test fail")
self.assertEqual(difference_dict['Intent1'],
[100, 100, 0],
"train test difference test fail")
# Test 3
percentage_dict1 = {'Intent1': 1, 'Intent2': 0}
percentage_dict2 = {'Intent1': 0, 'Intent2': 1}
missing_labels, difference_dict, js_distance = \
divergence_analyzer._train_test_label_difference(percentage_dict1, percentage_dict2)
self.assertEqual(js_distance, 1, "train test difference test fail")
self.assertEqual(difference_dict['Intent1'],
[100, 0, 100],
"train test difference test fail")
self.assertEqual(difference_dict['Intent2'],
[0, 100, 100],
"train test difference test fail")
self.assertEqual(len(missing_labels), 0, "train test difference test fail")
# Test 4
percentage_dict1 = {'Intent1': 1}
percentage_dict2 = {'Intent2': 1}
missing_labels, difference_dict, js_distance = \
divergence_analyzer._train_test_label_difference(percentage_dict1, percentage_dict2)
self.assertEqual(str(js_distance), 'nan', "train test difference test fail")
self.assertEqual(missing_labels, ['Intent1'], "train test difference test fail")
self.assertEqual(len(difference_dict), 0, "train test difference test fail")
def tearDown(self):
unittest.TestCase.tearDown(self)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11332904
|
from dataclasses import dataclass
from shared.paths import RESOURCE_DIR
@dataclass(frozen=True)
class Edge:
src: int
dest: int
weight: int
def find(parent: list[int], x: int) -> int:
while x != parent[x]:
x = parent[x]
return x
def union(parent: list[int], rank: list[int], edge: Edge) -> None:
rx = find(parent, edge.src)
ry = find(parent, edge.dest)
if rx == ry:
return
if rank[rx] > rank[ry]:
parent[ry] = rx
else:
parent[rx] = ry
if rank[rx] == rank[ry]:
rank[ry] += 1
def kruskal(graph: list[Edge], vertices: int) -> list[Edge]:
graph = sorted(graph, key=lambda item: item.weight)
parent = [i for i in range(vertices + 1)]
rank = [0 for _ in range(vertices + 1)]
a = []
for edge in graph:
if find(parent, edge.src) != find(parent, edge.dest):
a.append(edge)
union(parent, rank, edge)
return a
def read_file(path: str) -> list[Edge]:
with open(path, 'r') as f:
return [Edge(i, j, int(weight))
for i, line in enumerate(f)
for j, weight in enumerate(line.rstrip().split(','))
if weight.isnumeric()]
def find_max_saving(graph: list[Edge], vertices: int) -> int:
tree = kruskal(graph, vertices)
start_weight = sum(edge.weight for edge in graph) // 2
final_weight = sum(edge.weight for edge in tree)
return start_weight - final_weight
def main():
graph = read_file(RESOURCE_DIR / 'problem_107_network.txt')
vertices = 40
savings = find_max_saving(graph, vertices)
print(savings)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5094347
|
# pylint: disable=W0614,wildcard-import
"""
A hack to be able to load easy_thumbnails templatetags using
{% load easy_thumbnails %}
instead of
{% lead thumbnail %}.
The reason for doing this is that sorl.thumbnail and easy_thumbnails both
name their templatetag module 'thumbnail' and one gets conflicts.
The reason for using both thumbnail libraries is that they are used in different apps on pypi.
"""
from easy_thumbnails.templatetags.thumbnail import * # noqa: F401, F403
|
StarcoderdataPython
|
11212654
|
#! /usr/local/bin/python
import sys
import os
import re
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This ProcessError.py file as part of tiny-ci will #
# Be run for every failed test #
# #
# It is up to the user to decide how the error should #
# be handled in this script #
# #
# By default the script is set up to send an email #
# with the error message (altough the SMTP settings #
# first needs to be filled in) #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Here is the avalible information about the failure #
testName = sys.argv[1]
errorMessage = sys.argv[2]
# SMTP settings #
SMTPserver = ''
sender = ''
destination = ['']
USERNAME = ""
PASSWORD = ""
text_subtype = 'plain'
content="""\
A tiny-ci test has failed\n
""" + errorMessage
subject="Failed tiny-ci test '" + testName + "'";
## SEND THE EMAIL ##
from smtplib import SMTP_SSL as SMTP
from email.MIMEText import MIMEText
try:
msg = MIMEText(content, text_subtype)
msg['Subject'] = subject
msg['From'] = sender
conn = SMTP(SMTPserver)
conn.set_debuglevel(False)
conn.login(USERNAME, PASSWORD)
try:
conn.sendmail(sender, destination, msg.as_string())
finally:
conn.close()
except Exception, exc:
sys.exit( "mail failed; %s" % str(exc) )
|
StarcoderdataPython
|
1846922
|
from django.core.management.base import BaseCommand, CommandError
from tournaments.models import Tournament
import os
import csv
class Command(BaseCommand):
help = 'Load Projected Tournament Winnings'
def handle(self, *args, **options):
players_tournament_obj = Tournament.objects.get(name = 'The PLAYERS Championship')
os.chdir(os.path.join(os.getcwd(), 'external_files'))
csv_file_name = 'The PLAYERS Projected Winnings.csv'
winnings = []
with open(csv_file_name, encoding = 'utf-8-sig') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
winnings.append(row['Projected'])
players_tournament_obj.projected_winnings = str(winnings)
players_tournament_obj.save(update_fields = ['projected_winnings'])
self.stdout.write(self.style.SUCCESS('Successfully loaded projected winnings'))
|
StarcoderdataPython
|
1793184
|
<reponame>MateuszG/django_auth
import pytest; import tempfile; import os
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp() # '/tmp/tmpKUnnz2'
os.chdir(newpath)
@pytest.mark.usefixtures("cleandir")
class TestDirectoryInit:
def test_cwd_starts_empty(self):
assert os.listdir(os.getcwd()) == []
with open("myfile", "w") as f:
f.write("hello")
assert os.listdir(os.getcwd()) == ['myfile']
def test_cwd_again_starts_empty(self):
assert os.listdir(os.getcwd()) == []
|
StarcoderdataPython
|
3337478
|
import PIL
from PIL import Image
import src.pos as pos
class ImageMaker:
"""
This is a class for making Binary PFPs.
Attributes:
color (str): The color of the PFP.
"""
def __init__(self):
"""
Initializes the ImageMaker class.
Parameters:
None
Returns:
None. Only usable functions.
"""
def basic_pfp_maker(self, color):
"""
Makes a 64x64 image using the function in src\pos.py.
It uses the function in src\pos.py to make a 64x64 image
of the 64x64 array of 0 and 1 integers. It then converts the
image to a PIL image and saves it as a PNG file.
Parameters:
None
Returns:
None, just saves an image.
"""
color = color.lower()
bg = Image.open('./assets/imgs/64-64.png')
clr_asset = Image.open('./assets/imgs/' + color + '.png')
white = Image.open('./assets/imgs/white.png')
positions = pos.create_pos_lists()
for i in range(len(positions)):
for j in range(len(positions[i])):
if positions[i][j] == 1:
bg.paste(clr_asset, (j * 8, i * 8))
else:
bg.paste(white, (j * 8, i * 8))
bg.save('profile.png')
def alternating_pfp_maker(self, color_1, color_2):
"""
Makes a 64x64 image using the function in src\pos.py.
It uses the function in src\pos.py to make a 64x64 image
of the 64x64 array of 0 and 1 integers. It then converts the
image to a PIL image and saves it as a PNG file.
The only exception is that the image doesn't have the same color,
it has alternating rows of colors.
Parameters:
None
Returns:
None, just saves an image.
"""
bg = Image.open('./assets/imgs/64-64.png')
clr_asset_1 = Image.open('./assets/imgs/' + color_1 + '.png')
clr_asset_2 = Image.open('./assets/imgs/' + color_2 + '.png')
white = Image.open('./assets/im.0000000000gs/white.png')
positions = pos.create_pos_lists()
for i in range(len(positions)):
for j in range(len(positions[i])):
if i % 2 == 0 and positions[i][j] == 1:
bg.paste(clr_asset_1, (j * 8, i * 8))
elif i % 2 == 0 or positions[i][j] == 1:
bg.paste(white, (j * 8, i * 8))
else:
bg.paste(clr_asset_2, (j * 8, i * 8))
bg.save('profile2.png')
|
StarcoderdataPython
|
5137570
|
import numpy as np
import support.data_preprocessing as dpre
data = np.array([[18, 11, 5, 13, 14, 9, 9, 22, 26, 24,
19, 0, 14, 15, 8, 16, 8, 8, 16, 7,
11, 10, 20, 18, 15, 14, 49, 10, 16, 18,
8, 5, 9, 7, 13, 0, 7, 4, 11, 10],
[11, 14, 15, 18, 11, 13, 22, 11, 10, 4,
41, 65, 57, 38, 20, 62, 44, 15, 10, 47,
24, 17, 0, 9, 39, 13, 11, 12, 21, 19,
9, 15, 33, 8, 8, 7, 13, 2, 23, 12],
[1, 0, 1, 1, 0, 4, 0, 3, 4, 4,
1, 0, 1, 6, 8, 6, 4, 5, 1, 2,
3, 8, 8, 6, 0, 2, 2, 3, 2, 4,
3, 3, 0, 3, 5, 4, 2, 5, 1, 4]])
train_X_data = np.array([[18, 11, 5, 13, 14, 9, 9, 22, 26, 24,
19, 0, 14, 15, 8, 16, 8, 8, 16, 7,
11, 10, 20, 18, 15, 14, 49, 10, 16, 18],
[11, 14, 15, 18, 11, 13, 22, 11, 10, 4,
41, 65, 57, 38, 20, 62, 44, 15, 10, 47,
24, 17, 0, 9, 39, 13, 11, 12, 21, 19],
[1, 0, 1, 1, 0, 4, 0, 3, 4, 4,
1, 0, 1, 6, 8, 6, 4, 5, 1, 2,
3, 8, 8, 6, 0, 2, 2, 3, 2, 4]])
train_y_data = np.array([[8, 5, 9, 7, 13, 0, 7, 4, 11, 10],
[9, 15, 33, 8, 8, 7, 13, 2, 23, 12],
[3, 3, 0, 3, 5, 4, 2, 5, 1, 4]])
test_data = np.array([[18, 11, 5, 13, 14, 9, 9, 22, 26, 24,
19, 0, 14, 15, 8, 16, 8, 8, 16, 7,
11, 10, 20, 18, 15, 14, 49, 10, 16, 18,
8, 5, 9, 7, 13, 0, 7, 4, 11, 10,
4, 8, 19, 6, 7, 12, 7, 14, 5, 9],
[11, 14, 15, 18, 11, 13, 22, 11, 10, 4,
41, 65, 57, 38, 20, 62, 44, 15, 10, 47,
24, 17, 0, 9, 39, 13, 11, 12, 21, 19,
9, 15, 33, 8, 8, 7, 13, 2, 23, 12,
4, 1, 0, 9, 3, 10, 6, 12, 21, 9],
[1, 0, 1, 1, 0, 4, 0, 3, 4, 4,
1, 0, 1, 6, 8, 6, 4, 5, 1, 2,
3, 8, 8, 6, 0, 2, 2, 3, 2, 4,
3, 3, 0, 3, 5, 4, 2, 5, 1, 4,
2, 1, 1, 3, 6, 2, 5, 3, 7, 3]])
test_X_data = np.array([[19, 0, 14, 15, 8, 16, 8, 8, 16, 7,
11, 10, 20, 18, 15, 14, 49, 10, 16, 18,
8, 5, 9, 7, 13, 0, 7, 4, 11, 10],
[41, 65, 57, 38, 20, 62, 44, 15, 10, 47,
24, 17, 0, 9, 39, 13, 11, 12, 21, 19,
9, 15, 33, 8, 8, 7, 13, 2, 23, 12],
[1, 0, 1, 6, 8, 6, 4, 5, 1, 2,
3, 8, 8, 6, 0, 2, 2, 3, 2, 4,
3, 3, 0, 3, 5, 4, 2, 5, 1, 4]])
test_y_data = np.array([[4, 8, 19, 6, 7, 12, 7, 14, 5, 9],
[4, 1, 0, 9, 3, 10, 6, 12, 21, 9],
[2, 1, 1, 3, 6, 2, 5, 3, 7, 3]])
def test_normalise():
norm_data = dpre.normalise_transform(data)
# normalising should not change data shape
assert norm_data.shape == data.shape
reverse_data = dpre.normalise_reverse(norm_data)
# normalise_reverse should be able to turn normalised data back to original data
assert np.amax(abs(reverse_data - data)) < 0.00001
def test_split():
train_X, train_y, test_X, test_y = split_data(data, test_data, pred_days=10)
# split_data function should be able to spilt data properly
assert train_X == train_X_data
assert train_y == train_y_data
assert test_X == test_y_data
assert test_y == test_y_data
|
StarcoderdataPython
|
5089033
|
from collections import defaultdict
import json
from typing import Any
from zipfile import ZipFile
import geopandas as gpd
import pandera
from pandera import DataFrameSchema, Column, Check, Index
import pandas as pd
def concatenate_local_authority_floor_areas(upstream: Any, product: Any) -> None:
dcc = pd.read_excel(upstream["download_valuation_office_floor_areas_dcc"])
dlrcc = pd.read_excel(upstream["download_valuation_office_floor_areas_dlrcc"])
sdcc = pd.read_excel(upstream["download_valuation_office_floor_areas_sdcc"])
fcc = pd.read_excel(upstream["download_valuation_office_floor_areas_fcc"])
dublin = pd.concat([dcc, dlrcc, sdcc, fcc])
dublin.to_csv(product, index=False)
def validate_dublin_floor_areas(product: Any) -> None:
dublin_floor_areas = pd.read_csv(product)
schema = DataFrameSchema(
columns={
"PropertyNo": Column(
dtype=pandera.engines.numpy_engine.Int64,
checks=[
Check.greater_than_or_equal_to(min_value=272845.0),
Check.less_than_or_equal_to(max_value=5023334.0),
],
nullable=False,
unique=False,
coerce=False,
required=True,
regex=False,
),
"County": Column(
dtype=pandera.engines.numpy_engine.Object,
checks=None,
nullable=False,
unique=False,
coerce=False,
required=True,
regex=False,
),
"LA": Column(
dtype=pandera.engines.numpy_engine.Object,
checks=None,
nullable=False,
unique=False,
coerce=False,
required=True,
regex=False,
),
"Category": Column(
dtype=pandera.engines.numpy_engine.Object,
checks=None,
nullable=False,
unique=False,
coerce=False,
required=True,
regex=False,
),
"Use1": Column(
dtype=pandera.engines.numpy_engine.Object,
checks=None,
nullable=True,
unique=False,
coerce=False,
required=True,
regex=False,
),
"Use2": Column(
dtype=pandera.engines.numpy_engine.Object,
checks=None,
nullable=True,
unique=False,
coerce=False,
required=True,
regex=False,
),
"List_Status": Column(
dtype=pandera.engines.numpy_engine.Object,
checks=None,
nullable=False,
unique=False,
coerce=False,
required=True,
regex=False,
),
"Total_SQM": Column(
dtype=pandera.engines.numpy_engine.Float64,
checks=[
Check.greater_than_or_equal_to(min_value=0.0),
Check.less_than_or_equal_to(max_value=5373112.83),
],
nullable=False,
unique=False,
coerce=False,
required=True,
regex=False,
),
"X_ITM": Column(
dtype=pandera.engines.numpy_engine.Float64,
checks=[
Check.greater_than_or_equal_to(min_value=599999.999),
Check.less_than_or_equal_to(max_value=729666.339),
],
nullable=True,
unique=False,
coerce=False,
required=True,
regex=False,
),
"Y_ITM": Column(
dtype=pandera.engines.numpy_engine.Float64,
checks=[
Check.greater_than_or_equal_to(min_value=716789.52),
Check.less_than_or_equal_to(max_value=4820966.962),
],
nullable=True,
unique=False,
coerce=False,
required=True,
regex=False,
),
},
index=Index(
dtype=pandera.engines.numpy_engine.Int64,
checks=[
Check.greater_than_or_equal_to(min_value=0.0),
Check.less_than_or_equal_to(max_value=53285.0),
],
nullable=False,
coerce=False,
name=None,
),
coerce=True,
strict=False,
name=None,
)
schema(dublin_floor_areas)
def convert_benchmark_uses_to_json(upstream: Any, product: Any) -> None:
uses_grouped_by_category = defaultdict()
with ZipFile(upstream["download_benchmark_uses"]) as zf:
for filename in zf.namelist():
name = filename.split("/")[-1].replace(".txt", "")
with zf.open(filename, "r") as f:
uses_grouped_by_category[name] = [
line.rstrip().decode("utf-8") for line in f
]
benchmark_uses = {i: k for k, v in uses_grouped_by_category.items() for i in v}
with open(product, "w") as f:
json.dump(benchmark_uses, f)
def weather_adjust_benchmarks(upstream: Any, product: Any) -> None:
benchmarks = pd.read_csv(upstream["download_benchmarks"])
# 5y average for Dublin Airport from 2015 to 2020
dublin_degree_days = 2175
tm46_degree_days = 2021
degree_day_factor = dublin_degree_days / tm46_degree_days
weather_dependent_electricity = (
benchmarks["Typical electricity [kWh/m²y]"]
* benchmarks["% electricity pro-rated to degree days"]
* degree_day_factor
)
weather_independent_electricity = benchmarks["Typical electricity [kWh/m²y]"] * (
1 - benchmarks["% electricity pro-rated to degree days"]
)
electricity = weather_dependent_electricity + weather_independent_electricity
# ASSUMPTION: space heat is the only electrical heat
electricity_heat = (
weather_dependent_electricity * benchmarks["% suitable for DH or HP"]
)
weather_dependent_fossil_fuel = (
benchmarks["Typical fossil fuel [kWh/m²y]"]
* benchmarks["% fossil fuel pro-rated to degree days"]
* degree_day_factor
)
weather_independent_fossil_fuel = benchmarks["Typical fossil fuel [kWh/m²y]"] * (
1 - benchmarks["% fossil fuel pro-rated to degree days"]
)
fossil_fuel = weather_dependent_fossil_fuel + weather_independent_fossil_fuel
# ASSUMPTION: fossil fuel is only used for space heat & hot water
fossil_fuel_heat = fossil_fuel * benchmarks["% suitable for DH or HP"]
industrial_low_temperature_heat = (
benchmarks["Industrial space heat [kWh/m²y]"] * degree_day_factor
+ benchmarks["Industrial process energy [kWh/m²y]"]
* benchmarks["% suitable for DH or HP"]
)
industrial_high_temperature_heat = benchmarks[
"Industrial process energy [kWh/m²y]"
] * (1 - benchmarks["% suitable for DH or HP"])
normalised_benchmarks = pd.DataFrame(
{
"Benchmark": benchmarks["Benchmark"],
"typical_area_m2": benchmarks["Typical Area [m²]"],
"area_upper_bound_m2": benchmarks["Area Upper Bound [m²]"],
"typical_electricity_kwh_per_m2y": electricity,
"typical_fossil_fuel_kwh_per_m2y": fossil_fuel,
"typical_building_energy_kwh_per_m2y": benchmarks[
"Industrial building total [kWh/m²y]"
],
"typical_process_energy_kwh_per_m2y": benchmarks[
"Industrial process energy [kWh/m²y]"
],
"typical_electricity_heat_kwh_per_m2y": electricity_heat,
"typical_fossil_fuel_heat_kwh_per_m2y": fossil_fuel_heat,
"typical_industrial_low_temperature_heat_kwh_per_m2y": industrial_low_temperature_heat,
"typical_industrial_high_temperature_heat_kwh_per_m2y": industrial_high_temperature_heat,
}
)
normalised_benchmarks.to_csv(product, index=False)
def replace_unexpectedly_large_floor_areas_with_typical_values(
upstream: Any, product: Any
) -> None:
buildings = pd.read_csv(upstream["concatenate_local_authority_floor_areas"])
benchmarks = pd.read_csv(upstream["weather_adjust_benchmarks"])
with open(upstream["convert_benchmark_uses_to_json"], "r") as f:
benchmark_uses = json.load(f)
buildings["Benchmark"] = (
buildings["Use1"].map(benchmark_uses).rename("Benchmark").fillna("Unknown")
)
buildings_with_benchmarks = buildings.merge(benchmarks)
bounded_area_m2 = buildings_with_benchmarks["Total_SQM"].rename("bounded_area_m2")
typical_area = buildings_with_benchmarks["typical_area_m2"]
greater_than_zero_floor_area = buildings_with_benchmarks["Total_SQM"] > 0
greater_than_typical_benchmark_upper_bound = (
buildings_with_benchmarks["Total_SQM"]
> buildings_with_benchmarks["area_upper_bound_m2"]
)
valid_benchmark = ~buildings_with_benchmarks["Benchmark"].isin(["Unknown", "None"])
area_is_greater_than_expected = (
greater_than_zero_floor_area
& greater_than_typical_benchmark_upper_bound
& valid_benchmark
)
bounded_area_m2.loc[area_is_greater_than_expected] = typical_area.loc[
area_is_greater_than_expected
]
propertyno_bounded_area_map = pd.concat(
[buildings["PropertyNo"], bounded_area_m2], axis=1
)
propertyno_bounded_area_map.to_csv(product, index=False)
def save_unknown_benchmark_uses(upstream: Any, product: Any) -> None:
buildings = pd.read_csv(upstream["concatenate_local_authority_floor_areas"])
benchmarks = pd.read_csv(upstream["weather_adjust_benchmarks"])
with open(upstream["convert_benchmark_uses_to_json"], "r") as f:
benchmark_uses = json.load(f)
buildings["Benchmark"] = (
buildings["Use1"].map(benchmark_uses).rename("Benchmark").fillna("Unknown")
)
buildings_with_benchmarks = buildings.merge(benchmarks)
benchmark_is_unknown = buildings_with_benchmarks["Benchmark"] == "Unknown"
unknown_benchmark_uses = pd.Series(
buildings_with_benchmarks.loc[benchmark_is_unknown, "Use1"].unique(),
name="Use1",
)
unknown_benchmark_uses.to_csv(product, index=False)
def apply_energy_benchmarks_to_floor_areas(
upstream: Any, product: Any, boiler_efficiency: float
) -> None:
buildings = pd.read_csv(upstream["concatenate_local_authority_floor_areas"])
benchmarks = pd.read_csv(upstream["weather_adjust_benchmarks"])
with open(upstream["convert_benchmark_uses_to_json"], "r") as f:
benchmark_uses = json.load(f)
buildings["Benchmark"] = (
buildings["Use1"].map(benchmark_uses).rename("Benchmark").fillna("Unknown")
)
buildings_with_benchmarks = buildings.merge(benchmarks)
# Replace invalid floor areas with typical values
bounded_area_m2 = buildings_with_benchmarks["Total_SQM"].rename("bounded_area_m2")
greater_than_zero_floor_area = buildings_with_benchmarks["Total_SQM"] > 0
greater_than_typical_benchmark_upper_bound = (
buildings_with_benchmarks["Total_SQM"]
> buildings_with_benchmarks["area_upper_bound_m2"]
)
valid_benchmark = ~buildings_with_benchmarks["Benchmark"].isin(["Unknown", "None"])
area_is_greater_than_expected = (
greater_than_zero_floor_area
& greater_than_typical_benchmark_upper_bound
& valid_benchmark
)
bounded_area_m2.loc[area_is_greater_than_expected] = buildings_with_benchmarks[
"typical_area_m2"
].loc[area_is_greater_than_expected]
buildings_with_benchmarks["bounded_area_m2"] = bounded_area_m2
# Apply Benchmarks
kwh_to_mwh = 1e-3
buildings_with_benchmarks["electricity_demand_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks["typical_electricity_kwh_per_m2y"].fillna(0)
* kwh_to_mwh
).fillna(0)
buildings_with_benchmarks["fossil_fuel_demand_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks["typical_fossil_fuel_kwh_per_m2y"].fillna(0)
* kwh_to_mwh
* boiler_efficiency
).fillna(0)
buildings_with_benchmarks["building_energy_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks["typical_building_energy_kwh_per_m2y"].fillna(0)
* kwh_to_mwh
)
buildings_with_benchmarks["process_energy_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks["typical_process_energy_kwh_per_m2y"].fillna(0)
* kwh_to_mwh
)
buildings_with_benchmarks["electricity_heat_demand_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks["typical_electricity_heat_kwh_per_m2y"].fillna(0)
* kwh_to_mwh
* boiler_efficiency
).fillna(0)
buildings_with_benchmarks["fossil_fuel_heat_demand_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks["typical_fossil_fuel_heat_kwh_per_m2y"].fillna(0)
* kwh_to_mwh
* boiler_efficiency
).fillna(0)
buildings_with_benchmarks["industrial_low_temperature_heat_demand_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks[
"typical_industrial_low_temperature_heat_kwh_per_m2y"
].fillna(0)
* kwh_to_mwh
)
buildings_with_benchmarks["industrial_high_temperature_heat_demand_mwh_per_y"] = (
bounded_area_m2.fillna(0)
* buildings_with_benchmarks[
"typical_industrial_high_temperature_heat_kwh_per_m2y"
].fillna(0)
* kwh_to_mwh
)
buildings_with_benchmarks.to_csv(product, index=False)
def link_valuation_office_to_small_areas(upstream: Any, product: Any) -> None:
valuation_office = pd.read_csv(upstream["apply_energy_benchmarks_to_floor_areas"])
small_area_boundaries = gpd.read_file(
str(upstream["download_small_area_boundaries"])
)
valuation_office_geo = gpd.GeoDataFrame(
valuation_office,
geometry=gpd.points_from_xy(
x=valuation_office["X_ITM"], y=valuation_office["Y_ITM"], crs="EPSG:2157"
),
)
valuation_office_in_small_areas = gpd.sjoin(
valuation_office_geo,
small_area_boundaries[["small_area", "geometry"]],
op="within",
).drop(columns=["geometry", "index_right"])
valuation_office_in_small_areas.to_csv(product, index=False)
def remove_none_and_unknown_benchmark_buildings(upstream: Any, product: Any) -> None:
buildings = pd.read_csv(upstream["link_valuation_office_to_small_areas"])
without_none_or_unknown_benchmarks = buildings.query(
"Benchmark != ['Unknown', 'None']"
)
without_none_or_unknown_benchmarks.to_csv(product, index=None)
|
StarcoderdataPython
|
3568832
|
'''
Create a program that reads a number and show its multiplication table
'''
number = int(input('Type a number: '))
print('-' * 12)
print(f'\033[34m{number}\033[m x \033[34m{1:>2}\033[m = \033[32m{number * 1}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{2:>2}\033[m = \033[32m{number * 2}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{3:>2}\033[m = \033[32m{number * 3}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{4:>2}\033[m = \033[32m{number * 4}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{5:>2}\033[m = \033[32m{number * 5}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{6:>2}\033[m = \033[32m{number * 6}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{7:>2}\033[m = \033[32m{number * 7}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{8:>2}\033[m = \033[32m{number * 8}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{9:>2}\033[m = \033[32m{number * 9}\033[m')
print(f'\033[34m{number}\033[m x \033[34m{10}\033[m = \033[32m{number * 10}\033[m')
print('-' * 12)
|
StarcoderdataPython
|
1772215
|
<filename>script.mrknow.urlresolver/lib/urlresolver9/plugins/mailru.py
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import json
import urllib
from lib import helpers
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class MailRuResolver(UrlResolver):
name = "mail.ru"
domains = ['mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru', 'api.video.mail.ru']
pattern = '(?://|\.)(mail\.ru)/.+?/(?:embed/|)(inbox|mail|embed)/(?:(.+?)/.+?/)?(\d+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
print host
print media_id
response = self.net.http_GET(web_url)
html = response.content
if html:
try:
js_data = json.loads(html)
sources = [(video['key'], video['url']) for video in js_data['videos']]
sources = sources[::-1]
source = helpers.pick_source(sources)
source = source.encode('utf-8')
return source + helpers.append_headers({'Cookie': response.get_headers(as_dict=True).get('Set-Cookie', '')})
except:
raise ResolverError('No playable video found.')
else:
raise ResolverError('No playable video found.')
def get_url(self, host, media_id):
location, user, media_id = media_id.split('|')
if user == 'None':
try:
web_url = 'https://my.mail.ru/video/embed/%s' % media_id
response = self.net.http_GET(web_url)
html = response.content.encode('utf-8')
media_id = re.search(r'[\"\']movieSrc[\"\']\s?:\s?[\"\'](.*?)[\"\']', html).groups()[0]
return 'http://videoapi.my.mail.ru/videos/%s.json?ver=0.2.60' % (media_id)
except:
raise ResolverError('No playable video found.')
else: return 'http://videoapi.my.mail.ru/videos/%s/%s/_myvideo/%s.json?ver=0.2.60' % (location, user, media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return (r.groups()[0], '%s|%s|%s' % (r.groups()[1], r.groups()[2], r.groups()[3]))
else:
return False
|
StarcoderdataPython
|
304396
|
# Copyright (C) 2010
# Author: <NAME>
# Contact: <<EMAIL>>
__version__ = '1.5.2'
__all__ = [
'OpenSSL',
'ecc',
'cipher',
'hash',
]
from .openssl import OpenSSL
from .ecc import ECC
from .cipher import Cipher
from .hash import hmac_sha256, hmac_sha512, pbkdf2
|
StarcoderdataPython
|
8138719
|
<reponame>DazEB2/SimplePyScripts<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/madmaze/pytesseract
import re
# pip install pillow
from PIL import Image
# pip install pytesseract
# Tesseract.exe from https://github.com/UB-Mannheim/tesseract/wiki
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
# Simple image to string
img = Image.open('test.jpg')
text = pytesseract.image_to_string(img, lang='eng')
text = re.sub(r'(\s){2,}', '\1', text)
print(text)
# At this Time. two Great Empires struggled
# for Dominion over Ivalice:Archadia in the East. Rozarria. the West.
# AIChdadladT he Invasion of the Kingdom of NabradiawasArchadia's first Step RRs s. westward M Cigale
# abradiaWith Cord Rasler's area omeland con rit aNthe Hell-Fires of War. it aa TaeWa eeiad Velma Veerwould soon mete out a like Fate to Valmasca.
# KOZarrlaThe Fall of the Fortress at Nalbina
# tolled the Destruction of the greater
# part of Dalmasca’'s f orces.
|
StarcoderdataPython
|
6570224
|
<filename>blender/2.79/scripts/addons/presets/operator/mesh.primitive_round_cube_add/Capsule.py
import bpy
op = bpy.context.active_operator
op.radius = 0.5
op.arc_div = 8
op.lin_div = 0
op.size = (0.0, 0.0, 3.0)
op.div_type = 'CORNERS'
|
StarcoderdataPython
|
1843494
|
from stream_framework.feeds.aggregated_feed.cassandra import CassandraAggregatedFeed
from stream_framework.feeds.notification_feed.base import BaseNotificationFeed
from stream_framework.storage.redis.lists_storage import RedisListsStorage
from lego.apps.feed.activities import Activity, AggregatedActivity, NotificationActivity
from lego.apps.feed.aggregator import FeedAggregator
from lego.apps.feed.feed_models import AggregatedActivityModel
from lego.apps.feed.feed_serializers import AggregatedActivitySerializer
class AggregatedFeed(CassandraAggregatedFeed):
"""
Aggregated feed. Group activities by type.
Usage:
* Set the column_family used to store the feed in cassandra
timeline_cf_name = ''
"""
key_format = '%(user_id)s'
timeline_model = AggregatedActivityModel
timeline_serializer = AggregatedActivitySerializer
aggregator_class = FeedAggregator
activity_class = Activity
aggregated_activity_class = AggregatedActivity
class NotificationFeed(AggregatedFeed, BaseNotificationFeed):
"""
Track read/seen states on an aggregated feed.
"""
markers_storage_class = RedisListsStorage
aggregated_activity_class = NotificationActivity
|
StarcoderdataPython
|
93510
|
# -*- coding: utf-8 -*-
"""
threaded_ping_server.py
~~~~~~~~~~~~~~~~~~~~~~~
TCP server based on threads simulating ping output.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import logging
import select
import socket
import sys
import threading
import time
from contextlib import closing
ping_output = '''
greg@debian:~$ ping 10.0.2.15
PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.
64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms
64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms
64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
ping: sendmsg: Network is unreachable
ping: sendmsg: Network is unreachable
ping: sendmsg: Network is unreachable
64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms
64 bytes from 10.0.2.15: icmp_req=8 ttl=64 time=0.056 ms
'''
def ping_sim_tcp_server(server_port, ping_ip, client, address):
_, client_port = address
logger = logging.getLogger('threaded.ping.tcp-server({} -> {})'.format(server_port,
client_port))
logger.debug('connection accepted - client at tcp://{}:{}'.format(*address))
ping_out = ping_output.replace("10.0.2.15", ping_ip)
ping_lines = ping_out.splitlines(True)
with closing(client):
for ping_line in ping_lines:
data = ping_line.encode(encoding='utf-8')
try:
client.sendall(data)
except socket.error: # client is gone
break
time.sleep(1) # simulate delay between ping lines
logger.info('Connection closed')
def server_loop(server_port, server_socket, ping_ip, done_event):
logger = logging.getLogger('threaded.ping.tcp-server({})'.format(server_port))
while not done_event.is_set():
# without select we can't break loop from outside (via done_event)
# since .accept() is blocking
read_sockets, _, _ = select.select([server_socket], [], [], 0.1)
if not read_sockets:
continue
client_socket, client_addr = server_socket.accept()
client_socket.setblocking(1)
client_thread = threading.Thread(target=ping_sim_tcp_server,
args=(server_port, ping_ip,
client_socket, client_addr))
client_thread.start()
logger.debug("Ping Sim: ... bye")
def start_ping_sim_server(server_address, ping_ip):
"""Run server simulating ping command output, this is one-shot server"""
_, server_port = server_address
logger = logging.getLogger('threaded.ping.tcp-server({})'.format(server_port))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(server_address)
server_socket.listen(1)
logger.debug("Ping Sim started at tcp://{}:{}".format(*server_address))
done_event = threading.Event()
server_thread = threading.Thread(target=server_loop,
args=(server_port, server_socket, ping_ip,
done_event))
server_thread.start()
return server_thread, done_event
def tcp_connection(address, moler_conn):
"""Forwarder reading from tcp network transport layer"""
logger = logging.getLogger('threaded.tcp-connection({}:{})'.format(*address))
logger.debug('... connecting to tcp://{}:{}'.format(*address))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(address)
with closing(client_socket):
while True:
data = client_socket.recv(128)
if data:
logger.debug('<<< {!r}'.format(data))
# Forward received data into Moler's connection
moler_conn.data_received(data)
yield data
else:
logger.debug("... closed")
break
def start_ping_servers(servers_addr):
servers = []
for address, ping_ip in servers_addr:
# simulate pinging given IP
server_thread, server_done = start_ping_sim_server(address, ping_ip)
servers.append((server_thread, server_done))
return servers
def stop_ping_servers(servers):
for server_thread, server_done in servers:
server_done.set()
server_thread.join()
# ==============================================================================
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s |%(name)-40s |%(message)s',
datefmt='%H:%M:%S',
stream=sys.stderr,
)
connections2serve = [(('localhost', 5671), '10.0.2.15'),
(('localhost', 5672), '10.0.2.16')]
servers = start_ping_servers(connections2serve)
time.sleep(2)
stop_ping_servers(servers)
|
StarcoderdataPython
|
343366
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""
Federated feature engineering client-side
support postive_ratio, woe, iv, ks, auc
"""
import logging
from . import metrics_client as mc
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class FederatedFeatureEngineeringClient(object):
"""
Federated feature engineering client-side implementation
"""
def __init__(self, key_len=1024):
"""
init paillier instance with given key_len
"""
self._paillier = mc.hu.Paillier()
self._paillier.keygen(key_len)
logger.info('keygen done, key_len is {} bits'.format(key_len))
def connect(self, channel):
"""
client init the grpc channel with server
"""
self._channel = channel
def get_positive_ratio(self, labels):
"""
return postive ratio to client
params:
labels: a list in the shape of (sample_size, 1)
labels[i] is either 0 or 1, represents negative and positive resp.
e.g. [[1], [0], [1],...,[1]]
return:
a positive ratio list including feature_size dicts
each dict represents the positive ratio (float) of each feature value
e.g. [{0: 0.2, 1: 0.090909}, {1: 0.090909, 0: 0.2, 2: 0.02439}...]
"""
return mc.get_mpc_postive_ratio_alice(self._channel, labels, self._paillier)
def get_woe(self, labels):
"""
return woe to client
params:
labels: a list in the shape of (sample_size, 1)
labels[i] is either 0 or 1, represents negative and positive resp.
e.g. [[1], [0], [1],...,[1]]
return:
a woe list including feature_size dicts
each dict represents the woe (float) of each feature value
e.g. [{1: 0.0, 0: 0.916291}, {2: -1.386294, 1: 0.0, 0: 0.916291}]
"""
return mc.get_mpc_woe_alice(self._channel, labels, self._paillier)
def get_iv(self, labels):
"""
return iv to client
params:
labels: a list in the shape of (sample_size, 1)
labels[i] is either 0 or 1, represents negative and positive resp.
e.g. [[1], [0], [1],...,[1]]
return:
a list corresponding to the iv of each feature
e.g. [0.56653, 0.56653]
"""
return mc.get_mpc_iv_alice(self._channel, labels, self._paillier)
def get_woe_iv(self, labels):
"""
return woe, iv to client
params:
labels: a list in the shape of (sample_size, 1)
labels[i] is either 0 or 1, represents negative and positive resp.
e.g. [[1], [0], [1],...,[1]]
return:
a tuple of woe and iv
"""
return mc.get_mpc_iv_alice(self._channel, labels, self._paillier, True)
def get_ks(self, labels):
"""
reutrn ks to client
params:
labels: a list in the shape of (sample_size, 1)
labels[i] is either 0 or 1, represents negative and positive resp.
e.g. [[1], [0], [1],...,[1]]
return:
a list corresponding to the ks of each feature
e.g. [0.3, 0.3]
"""
return mc.get_mpc_ks_alice(self._channel, labels, self._paillier)
def get_auc(self, labels):
"""
reutrn auc to client
params:
labels: a list in the shape of (sample_size, 1)
labels[i] is either 0 or 1, represents negative and positive resp.
e.g. [[1], [0], [1],...,[1]]
return:
a list corresponding to the auc of each feature
e.g. [0.33, 0.33]
"""
return mc.get_mpc_auc_alice(self._channel, labels, self._paillier)
|
StarcoderdataPython
|
3472700
|
<filename>preprocess/analyse_data.py
# -*- coding:utf-8 -*-
import pandas as pd
train = pd.read_csv('../data/kaggle/train.tsv', sep="\t")
print(train.head(5))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.