id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
4834385 |
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, dyndep, workspace
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/prof:cuda_profile_ops")
class CudaProfileOpsTest(unittest.TestCase):
@unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU")
def test_run(self):
net = core.Net("net")
net.CudaProfileInitialize([], [], output="/tmp/cuda_profile_test")
net.CudaProfileStart([], [])
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
net.ConstantFill([], ["out"], shape=[1, 3, 244, 244])
net.CudaProfileStop([], [])
workspace.CreateNet(net)
workspace.RunNet(net)
| StarcoderdataPython |
3221372 | def get_divisors_count(number: int):
c = 2
for i in range(2, c):
if not (number % i):
c += 1
return c
def get_max_divisions_number(n: int, m: int) -> int:
maximum = n
for i in range(n, m + 1):
temp = get_divisors_count(i)
if maximum < temp:
temp = maximum
return maximum
| StarcoderdataPython |
3271200 | from collections import namedtuple
import math
import sys
from morton import Morton
try:
from shapely import geometry as shapely_geometry
except ImportError:
shapely_geometry = None
Point = namedtuple('Point', ['x', 'y'])
def is_point_on_line(point, start, end):
if start.x <= point.x and point.x <= end.x:
se = Point((end.x - start.x), (end.y - start.y))
sp = Point((point.x - start.x), (point.y - start.y))
return (sp.x*se.y - sp.y*se.x) == 0
return False
def ray_intersects_segment(point, start, end):
# type: (Point, Point, Point) -> bool
'''
Check if the point is inside or outside the polygon
using the ray-casting algorithm
(see http://rosettacode.org/wiki/Ray-casting_algorithm)
point : the point from which the ray starts
start : the end-point of the segment with the smallest y coordinate
('start' must be "below" 'end')
end : the end-point of the segment with the greatest y coordinate
('end' must be "above" 'start')
'''
if start.y > end.y:
start, end = end, start
if point.y == start.y or point.y == end.y:
point = Point(point.x, point.y + 0.00001)
if point.y > end.y or point.y < start.y or point.x > max(start.x, end.x):
return False
if point.x < min(start.x, end.x):
return True
m_blue = (point.y - start.y) / float(point.x - start.x) \
if point.x != start.x else sys.float_info.max
m_red = (end.y - start.y) / float(end.x - start.x) \
if start.x != end.x else sys.float_info.max
return m_blue >= m_red
def point_in_geometry(point, geometry):
# type: (Point, list) -> bool
# point does not belong to polygon if located on some edge
for i in range(1, len(geometry)):
if is_point_on_line(point, geometry[i-1], geometry[i]):
return False
if is_point_on_line(point, geometry[0], geometry[-1]):
return False
contains = ray_intersects_segment(point, geometry[-1], geometry[0])
for i in range(1, len(geometry)):
if ray_intersects_segment(point, geometry[i - 1], geometry[i]):
contains = not contains
return contains
class Hex(namedtuple('Hex', ['q', 'r'])):
@property
def s(self):
return -(self.q + self.r)
class FractionalHex(Hex):
def to_hex(self):
q = round(self.q)
r = round(self.r)
s = round(self.s)
dq = abs(q - self.q)
dr = abs(r - self.r)
ds = abs(s - self.s)
if(dq > dr and dq > ds):
q = -(r + s)
elif(dr > ds):
r = -(q + s)
return Hex(int(q), int(r))
_Orientation = namedtuple(
'Orientation',
['f', 'b', 'start_angle', 'sinuses', 'cosinuses'])
class Orientation(_Orientation):
def __new__(cls, f, b, start_angle, sinuses=None, cosinuses=None):
assert type(f) is list and len(f) == 4
assert type(b) is list and len(b) == 4
sinuses = sinuses or []
cosinuses = cosinuses or []
# prehash angles
if not sinuses:
for i in range(6):
angle = 2.0 * math.pi * (i + start_angle)/6.0
sinuses.append(math.sin(angle))
if not cosinuses:
for i in range(6):
angle = 2.0 * math.pi * (i + start_angle)/6.0
cosinuses.append(math.cos(angle))
return super(Orientation, cls).__new__(cls, f, b, start_angle, sinuses, cosinuses)
OrientationPointy = Orientation(
f=[math.sqrt(3.0), math.sqrt(3.0)/2.0, 0.0, 3.0/2.0],
b=[math.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0],
start_angle=0.5)
OrientationFlat = Orientation(
f=[3.0/2.0, 0.0, math.sqrt(3.0)/2.0, math.sqrt(3.0)],
b=[2.0/3.0, 0.0, -1.0/3.0, math.sqrt(3.0)/3.0],
start_angle=0.0)
_Grid = namedtuple('Grid', ['orientation', 'origin', 'size', 'morton'])
class Grid(_Grid):
def __new__(cls, orientation, origin, size, morton=None):
# type: (Orientation, Point, Point, Morton) -> Grid
morton = morton or Morton()
return super(Grid, cls).__new__(cls, orientation, origin, size, morton)
def hex_to_code(self, hex):
# type: (Hex) -> int
return self.morton.spack(hex.q, hex.r)
def hex_from_code(self, code):
# type: (int) -> Hex
return Hex(*self.morton.sunpack(code))
def hex_at(self, point):
# type: (Point) -> Hex
x = (point.x - self.origin.x) / float(self.size.x)
y = (point.y - self.origin.y) / float(self.size.y)
q = self.orientation.b[0]*x + self.orientation.b[1] * y
r = self.orientation.b[2]*x + self.orientation.b[3] * y
return FractionalHex(q, r).to_hex()
def hex_center(self, hex):
# type: (Hex) -> Point
f = self.orientation.f
x = (f[0] * hex.q + f[1]*hex.r)*self.size.x + self.origin.x
y = (f[2] * hex.q + f[3]*hex.r)*self.size.y + self.origin.y
return Point(x, y)
def hex_corners(self, hex):
# type: (Hex) -> list
corners = []
center = self.hex_center(hex)
for i in range(6):
x = self.size.x*self.orientation.cosinuses[i] + center.x
y = self.size.y*self.orientation.sinuses[i] + center.y
corners.append(Point(x, y))
return corners
def hex_neighbors(self, hex, layers):
# type: (Hex, int) -> list
neighbors = []
for q in range(-layers, layers+1):
r1 = max(-layers, -q-layers)
r2 = min(layers, -q+layers)
for r in range(r1, r2+1):
if q == 0 and r == 0:
continue
neighbors.append(Hex(q+hex.q, r+hex.r))
return neighbors
def _make_region(self, geometry):
# type: (list) -> Region
x, y = zip(*geometry)
hexes = [
self.hex_at(Point(min(x), min(y))),
self.hex_at(Point(min(x), max(y))),
self.hex_at(Point(max(x), max(y))),
self.hex_at(Point(max(x), min(y))),
]
q, r = zip(*hexes)
q_min = min(q) - 1
q_max = max(q) + 1
r_min = min(r) - 1
r_max = max(r) + 1
def any_point_in_geometry(ps, g):
for p in ps:
if point_in_geometry(p, g):
return True
hexes = []
lookup = {}
for q in range(q_min, q_max+1):
for r in range(r_min, r_max+1):
hex = Hex(q, r)
corners = self.hex_corners(hex)
if(
any_point_in_geometry(corners, geometry) or
any_point_in_geometry(geometry, corners)
):
hexes.append(hex)
lookup[self.hex_to_code(hex)] = True
return Region(self, hexes, lookup)
def _shapely_make_region(self, geometry):
# type: (list) -> Region
polygon = shapely_geometry.Polygon(geometry)
(minx, miny, maxx, maxy) = polygon.bounds
x, y = zip(*geometry)
hexes = [
self.hex_at(Point(minx, miny)),
self.hex_at(Point(minx, maxy)),
self.hex_at(Point(maxx, maxy)),
self.hex_at(Point(maxx, miny)),
]
q, r = zip(*hexes)
q_min = min(q) - 1
q_max = max(q) + 1
r_min = min(r) - 1
r_max = max(r) + 1
hexes = []
lookup = {}
for q in range(q_min, q_max+1):
for r in range(r_min, r_max+1):
hex = Hex(q, r)
corners = self.hex_corners(hex)
if polygon.intersects(shapely_geometry.Polygon(corners)):
hexes.append(hex)
lookup[self.hex_to_code(hex)] = True
return Region(self, hexes, lookup)
if shapely_geometry:
make_region = _shapely_make_region
else:
make_region = _make_region
class Region(namedtuple('Region', ['grid', 'hexes', 'lookup'])):
def union(self, region):
# type: (Region) -> Region
if self.grid is not region.grid:
raise ValueError("grid is different")
hexes = list(self.hexes)
lookup = dict(self.lookup)
for hex in region.hexes:
if self.contains(hex):
continue
hexes.append(hex)
lookup[self.grid.hex_to_code(hex)] = True
def contains(self, hex):
# type: (Hex) -> bool
return self.grid.hex_to_code(hex) in self.lookup
| StarcoderdataPython |
1705482 | # -*- coding: utf-8 -*-
c = 0
t = 0
p = input("enter a string to see if it is a palindrome ")
Pa = list(p)
count= len(Pa) - 1 and
while c <= count:
if Pa[c] == Pa[count - c]:
t = t + 1
c = c + 1
else:
print(p, "is not a palindrome")
c = count + 12
if t == count + 1:
print(p, "is a palindrome")
| StarcoderdataPython |
3334928 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 14:57
from __future__ import unicode_literals
import cms.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('title', models.CharField(max_length=255, verbose_name='Tytuł')),
('menu_id', models.CharField(max_length=255, verbose_name='Menu ID')),
('url', models.URLField(null=True)),
('target', models.CharField(blank=True, choices=[('_blank', 'Open in new window'), ('_self', 'Open in same window'), ('_parent', 'Delegate to parent'), ('_top', 'Delegate to top')], max_length=255, verbose_name='Target')),
('page', cms.models.fields.PageField(null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Page', verbose_name='Strona')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
46778 | <filename>02-Coding-skills-swagger/swagger_server/test/test_default_controller.py
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.aws_info import AwsInfo # noqa: E501
from swagger_server.test import BaseTestCase
class TestDefaultController(BaseTestCase):
"""DefaultController integration test stubs"""
def test_get_ec2_instances(self):
"""Test case for get_ec2_instances
List AWS EC2 instances
"""
AwsInfo = AwsInfo()
response = self.client.open(
'/ec2',
method='POST',
data=json.dumps(AwsInfo),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_healthz_get(self):
"""Test case for healthz_get
Health Check
"""
response = self.client.open(
'/healthz',
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| StarcoderdataPython |
77871 | from peewee import *
from .base import BaseModel
from . import climate
from . import hru as hru_db
from . import routing_unit
from . import aquifer
from . import channel
from . import reservoir
from . import exco
from . import dr
from . import simulation
from . import recall
class Con(BaseModel):
"""Inheritable base class for all connect files."""
name = CharField(unique=True)
gis_id = IntegerField(null=True)
area = DoubleField()
lat = DoubleField()
lon = DoubleField()
elev = DoubleField(null=True)
wst = ForeignKeyField(climate.Weather_sta_cli, null=True, on_delete='SET NULL')
cst = ForeignKeyField(simulation.Constituents_cs, null=True, on_delete='SET NULL')
ovfl = IntegerField() # ??? Pointer to the connections of spatial objects for overbank flooding
rule = IntegerField() # ??? Pointer to ruleset for flow fraction of hydrograph
class Con_out(BaseModel):
"""Inheritable base class for all outflow parameters in many of the connect files."""
order = IntegerField()
obj_typ = CharField()
obj_id = IntegerField()
hyd_typ = CharField()
frac = DoubleField()
class Hru_con(Con):
hru = ForeignKeyField(hru_db.Hru_data_hru, null=True, on_delete='SET NULL')
class Hru_con_out(Con_out):
hru_con = ForeignKeyField(Hru_con, on_delete='CASCADE', related_name='con_outs')
class Hru_lte_con(Con):
lhru = ForeignKeyField(hru_db.Hru_lte_hru, null=True, on_delete='SET NULL')
class Hru_lte_con_out(Con_out):
hru_lte_con = ForeignKeyField(Hru_lte_con, on_delete='CASCADE', related_name='con_outs')
class Rout_unit_con(Con):
rtu = ForeignKeyField(routing_unit.Rout_unit_rtu, null=True, on_delete='SET NULL')
class Rout_unit_con_out(Con_out):
rtu_con = ForeignKeyField(Rout_unit_con, on_delete ='CASCADE', related_name='con_outs')
class Modflow_con(Con):
mfl = IntegerField() # Should be FK to something, but no modflow object yet that I can find.
class Modflow_con_out(Con_out):
modflow_con = ForeignKeyField(Modflow_con, on_delete='CASCADE', related_name='con_outs')
class Aquifer_con(Con):
aqu = ForeignKeyField(aquifer.Aquifer_aqu, null=True, on_delete='SET NULL')
class Aquifer_con_out(Con_out):
aquifer_con = ForeignKeyField(Aquifer_con, on_delete='CASCADE', related_name='con_outs')
class Aquifer2d_con(Con):
aqu2d = ForeignKeyField(aquifer.Aquifer_aqu, null=True, on_delete='SET NULL') # Some doubt in documentation about this link
class Aquifer2d_con_out(Con_out):
aquifer2d_con = ForeignKeyField(Aquifer2d_con, on_delete='CASCADE', related_name='con_outs')
class Channel_con(Con):
cha = ForeignKeyField(channel.Channel_cha, null=True, on_delete='SET NULL')
class Channel_con_out(Con_out):
channel_con = ForeignKeyField(Channel_con, on_delete='CASCADE', related_name='con_outs')
class Reservoir_con(Con):
res = ForeignKeyField(reservoir.Reservoir_res, null=True, on_delete='SET NULL')
class Reservoir_con_out(Con_out):
reservoir_con = ForeignKeyField(Reservoir_con, on_delete='CASCADE', related_name='con_outs')
class Recall_con(Con):
rec = ForeignKeyField(recall.Recall_rec, null=True, on_delete='SET NULL')
class Recall_con_out(Con_out):
recall_con = ForeignKeyField(Recall_con, on_delete='CASCADE', related_name='con_outs')
class Exco_con(Con):
exco = ForeignKeyField(exco.Exco_exc, null=True, on_delete='SET NULL')
class Exco_con_out(Con_out):
exco_con = ForeignKeyField(Exco_con, on_delete='CASCADE', related_name='con_outs')
class Delratio_con(Con):
dlr = ForeignKeyField(dr.Delratio_del, null=True, on_delete='SET NULL')
class Delratio_con_out(Con_out):
delratio_con = ForeignKeyField(Delratio_con, on_delete='CASCADE', related_name='con_outs')
class Outlet_con(Con):
out = IntegerField() # Should be FK to something, but no outlet object yet that I can find.
class Outlet_con_out(Con_out):
outlet_con = ForeignKeyField(Outlet_con, on_delete='CASCADE', related_name='con_outs')
class Chandeg_con(Con):
lcha = ForeignKeyField(channel.Channel_lte_cha, null=True, on_delete='SET NULL') # Think this is connect for channel-lte, but not positive.
class Chandeg_con_out(Con_out):
chandeg_con = ForeignKeyField(Chandeg_con, on_delete='CASCADE', related_name='con_outs')
# Though organized in the routing unit section, this needs to be here due to circular referencing of rout_unit_con and rout_unit_rtu
class Rout_unit_ele(BaseModel):
name = CharField(unique=True)
rtu = ForeignKeyField(Rout_unit_con, null=True, related_name='elements', on_delete='SET NULL')
obj_typ = CharField()
obj_id = IntegerField()
frac = DoubleField()
dlr = ForeignKeyField(dr.Delratio_del, null=True, on_delete='SET NULL')
| StarcoderdataPython |
4831712 | <reponame>eragasa/pypospack<gh_stars>1-10
import numpy as np
from pypospack.potential import func_generalized_lj_w_cutoff
b1 =
b2
r1
V0
delta
| StarcoderdataPython |
1644230 | <gh_stars>0
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "<NAME> <<EMAIL>>"
from iotronic_lightningrod.proxies import Proxy
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
import json
import os
import site
import subprocess
import time
from iotronic_lightningrod.common.exception import NginxError
from iotronic_lightningrod.modules import utils
import iotronic_lightningrod.wampmessage as WM
from subprocess import call
class ProxyManager(Proxy.Proxy):
def __init__(self):
super(ProxyManager, self).__init__("nginx")
def finalize(self):
"""Function called at the end of module loading (after RPC registration).
:return:
"""
pass
def _proxyInfo(self):
nginxMsg = {}
try:
stat = subprocess.Popen('systemctl status nginx.service',
shell=True, stdout=subprocess.PIPE)
stdout_list = str(stat.communicate()[0]).split('\n')
for line in stdout_list:
if 'Active:' in line:
nginxMsg['log'] = line.split('\\n')[2].replace(" ", "")
if '(running)' in line:
nginxMsg['status'] = True
else:
nginxMsg['status'] = False
nginxMsg = json.dumps(nginxMsg)
return nginxMsg
except Exception as err:
LOG.error("Error check NGINX status: " + str(err))
nginxMsg['log'] = str(err)
nginxMsg['status'] = False
nginxMsg = json.dumps(nginxMsg)
return nginxMsg
def _proxyStatus(self):
nginxMsg = {}
try:
stat = subprocess.Popen(
'systemctl status nginx.service',
shell=True,
stdout=subprocess.PIPE
)
stdout_list = str(stat.communicate()[0]).split('\n')
for line in stdout_list:
if 'Active:' in line:
if '(running)' in line:
nginxMsg['log'] = "NGINX is running"
nginxMsg['status'] = True
# LOG.info("--> " + nginxMsg['log'])
else:
nginxMsg['log'] = "NGINX is not running"
nginxMsg['status'] = False
# LOG.warning("--> " + nginxMsg['log'])
except Exception as err:
nginxMsg['log'] = "Error check NGINX status: " + str(err)
nginxMsg['status'] = True
# LOG.error("--> " + nginxMsg['log'])
return json.dumps(nginxMsg)
def _proxyReload(self):
nginxMsg = {}
try:
stat = subprocess.call('service nginx reload', shell=True)
if stat != 0:
raise NginxError(str(stat))
else:
nginxMsg['log'] = "NGINX successfully reloaded"
nginxMsg['code'] = stat
LOG.info("--> " + nginxMsg['log'])
except NginxError:
nginxMsg['log'] = "NGINX reloading error"
nginxMsg['code'] = stat
LOG.warning("--> " + nginxMsg['log'])
except Exception as err:
nginxMsg['log'] = "NGINX Generic error: " + str(err)
nginxMsg['code'] = stat
LOG.warning("--> " + nginxMsg['log'])
nginxMsg = json.dumps(nginxMsg)
return nginxMsg
def _proxyRestart(self):
nginxMsg = {}
try:
stat = os.system('systemctl restart nginx')
if stat != 0:
raise NginxError(str(stat))
else:
nginxMsg['log'] = "NGINX successfully restart"
nginxMsg['code'] = stat
LOG.info("--> " + nginxMsg['log'])
except NginxError:
nginxMsg['log'] = "NGINX restarting error"
nginxMsg['code'] = stat
LOG.warning("--> " + nginxMsg['log'])
except Exception as err:
nginxMsg['log'] = "NGINX generic error: " + str(err)
nginxMsg['code'] = stat
LOG.warning("--> " + nginxMsg['log'])
return json.dumps(nginxMsg)
def nginx_conf_verify(self, fp):
with open(fp, "r") as text_file:
LOG.debug(text_file.read())
def _proxyEnableWebService(self, board_dns, owner_email):
nginxMsg = {}
try:
nginx_path = "/etc/nginx/conf.d/"
nginx_board_conf_file = nginx_path + "/" + board_dns + ".conf"
nginx_board_conf = '''server {{
listen 80;
server_name {0};
}}
'''.format(board_dns)
with open(nginx_board_conf_file, "w") as text_file:
text_file.write("%s" % nginx_board_conf)
self.nginx_conf_verify(nginx_board_conf_file)
time.sleep(3)
self._proxyReload()
time.sleep(3)
command = "/usr/bin/certbot -n " \
"--redirect " \
"--authenticator webroot " \
"--installer nginx " \
"-w /var/www/html/ " \
"--domain " + board_dns + " " \
"--agree-tos " \
"--email " + owner_email
LOG.debug(command)
certbot_result = call(command, shell=True)
LOG.info("CERTBOT RESULT: " + str(certbot_result))
nginxMsg['result'] = "SUCCESS"
nginxMsg['message'] = "Webservice module enabled."
LOG.info("--> " + nginxMsg['message'])
except Exception as err:
nginxMsg['log'] = "NGINX DNS setup error: " + str(err)
nginxMsg['code'] = ""
LOG.warning("--> " + nginxMsg['log'])
return json.dumps(nginxMsg)
def _exposeWebservice(self, board_dns, service_dns, local_port, dns_list):
nginxMsg = {}
try:
nginx_path = "/etc/nginx/conf.d"
service_path = nginx_path + "/" + service_dns + ".conf"
string = '''server {{
listen 80;
server_name {0};
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
location / {{
proxy_pass http://localhost:{1};
}}
}}
'''.format(service_dns, local_port)
with open(service_path, "w") as ws_nginx_conf:
ws_nginx_conf.write("%s" % string)
time.sleep(3)
self.nginx_conf_verify(service_path)
self._proxyReload()
time.sleep(3)
command = "/usr/bin/certbot " \
"--expand -n " \
"--redirect " \
"--authenticator webroot " \
"--installer nginx -w /var/www/html/ " \
"--domain " + str(dns_list)
command = "/usr/bin/certbot " \
"-n " \
"--redirect " \
"--authenticator webroot " \
"--installer nginx -w /var/www/html/ " \
"--cert-name " + str(board_dns) + " " \
"--domain " + str(dns_list)
LOG.debug(command)
certbot_result = call(command, shell=True)
LOG.info("CERTBOT RESULT: " + str(certbot_result))
LOG.info("Webservices list updated:\n" +
str(self._webserviceList()))
nginxMsg['result'] = "SUCCESS"
nginxMsg['message'] = "Webservice '" + service_dns + \
"' exposed in NGINX."
LOG.info(nginxMsg['message'])
except Exception as e:
nginxMsg['message'] = "Error exposing Webservice '" + \
service_dns + \
"' configuration in NGINX: {}".format(e)
nginxMsg['result'] = "ERROR"
LOG.warning("--> " + nginxMsg['message'])
return json.dumps(nginxMsg)
def _disableWebservice(self, service_dns, dns_list):
"""
:param service:
:param dns_list:
:return:
"""
nginxMsg = {}
try:
nginx_path = "/etc/nginx/conf.d"
service_path = nginx_path + "/" + service_dns + ".conf"
if os.path.exists(service_path):
os.remove(service_path)
time.sleep(1)
self._proxyReload()
time.sleep(3)
command = "/usr/bin/certbot " \
"--expand -n " \
"--redirect " \
"--authenticator webroot " \
"--installer nginx -w /var/www/html/ " \
"--domain " + str(dns_list)
LOG.debug(command)
certbot_result = call(command, shell=True)
LOG.info("CERTBOT RESULT: " + str(certbot_result))
LOG.info("Webservices list updated:\n" + str(
self._webserviceList()))
nginxMsg['message'] = "webservice '" \
+ service_dns + "' disabled."
nginxMsg['result'] = "SUCCESS"
LOG.info(nginxMsg['message'])
else:
nginxMsg['message'] = "webservice file " \
+ service_path + " does not exist"
nginxMsg['result'] = "ERROR"
except Exception as e:
nginxMsg['message'] = "Error disabling Webservice '" + \
service_dns + "': {}".format(e)
nginxMsg['result'] = "ERROR"
return json.dumps(nginxMsg)
def _webserviceList(self):
nginx_path = "/etc/nginx/conf.d/"
if os.path.exists(nginx_path):
service_list = [f for f in os.listdir(nginx_path)
if os.path.isfile(os.path.join(nginx_path, f))]
else:
service_list = []
return service_list
async def NginxInfo(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
message = self._proxyInfo()
w_msg = WM.WampSuccess(message)
return w_msg.serialize()
async def NginxStatus(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
message = self._proxyStatus()
w_msg = WM.WampSuccess(message)
return w_msg.serialize()
async def NginxReload(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
message = self._proxyReload()
w_msg = WM.WampSuccess(message)
return w_msg.serialize()
async def NginxRestart(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
message = self._proxyRestart()
w_msg = WM.WampSuccess(message)
return w_msg.serialize()
async def NginxIotronicConf(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
message = self._proxyIotronicConf()
w_msg = WM.WampSuccess(message)
return w_msg.serialize()
| StarcoderdataPython |
1618227 | <filename>scripts/relax_to_json.py
#!/usr/bin/env python
import os
import sys
import json
import pychemia
filename = None
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
print('Enter the JSON filename to store the data')
exit(1)
dirs = [x for x in os.listdir('.') if os.path.isdir(x)]
ret = []
for idir in dirs:
if os.path.isfile(idir + '/POSCAR'):
try:
st = pychemia.code.vasp.read_poscar(idir + '/POSCAR')
except ValueError:
print('Bad POSCAR\n\n' + open(idir + '/POSCAR').read())
continue
# shutil.copy2(idir+'/POSCAR',idir+'_POSCAR')
print(idir, st.natom)
else:
st = pychemia.Structure.load_json(idir + '/structure.json')
# shutil.copy2(idir+'/structure.json',idir+'_structure.json')
print('ERROR:', idir, st.natom)
continue
if os.path.isfile(idir + '/OUTCAR'):
try:
vo = pychemia.code.vasp.VaspOutput(idir + '/OUTCAR')
except ValueError:
print('Error reading Vasp Output @ ' + idir + '/OUTCAR')
continue
if not vo.has_forces_stress_energy():
print('Error extracting forces @ ' + idir + '/OUTCAR')
continue
else:
print('No OUTCAR found @ ' + idir)
continue
spacegroup = pychemia.crystal.CrystalSymmetry(st).number()
energy_pa = vo.final_data['energy']['free_energy'] / st.natom
data = {'id': idir, 'energy_pa': energy_pa, 'natom': st.natom, 'spacegroup': spacegroup,
'forces': vo.relaxation_info()['avg_force'],
'stress': max(vo.relaxation_info()['avg_stress_diag'], vo.relaxation_info()['avg_stress_non_diag'])}
ret.append(data)
wf = open(filename, 'w')
json.dump(ret, wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
| StarcoderdataPython |
1783246 | <reponame>benburk/shifty<filename>run_examples.py
"""run examples from the examples folder"""
from examples import calculator
def main():
"""run calculator"""
calculator.main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
4836029 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import page_cycler
import page_sets
from telemetry import test
class PageCyclerBloat(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.BloatPageSet
options = {'pageset_repeat': 10}
class PageCyclerDhtml(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.DhtmlPageSet
options = {'pageset_repeat': 10}
class PageCyclerIntlArFaHe(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.IntlArFaHePageSet
options = {'pageset_repeat': 10}
class PageCyclerIntlEsFrPtBr(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.IntlEsFrPtBrPageSet
options = {'pageset_repeat': 10}
class PageCyclerIntlHiRu(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.IntlHiRuPageSet
options = {'pageset_repeat': 10}
@test.Disabled('android', 'win') # crbug.com/379564, crbug.com/330909
class PageCyclerIntlJaZh(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.IntlJaZhPageSet
options = {'pageset_repeat': 10}
class PageCyclerIntlKoThVi(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.IntlKoThViPageSet
options = {'pageset_repeat': 10}
class PageCyclerMorejs(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.MorejsPageSet
options = {'pageset_repeat': 10}
class PageCyclerMoz(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.MozPageSet
options = {'pageset_repeat': 10}
@test.Disabled('linux', 'win') # crbug.com/353260
class PageCyclerNetsimTop10(test.Test):
"""Measures load time of the top 10 sites under simulated cable network."""
tag = 'netsim'
test = page_cycler.PageCycler
page_set = page_sets.Top10PageSet
options = {
'cold_load_percent': 100,
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': 5,
}
def __init__(self):
super(PageCyclerNetsimTop10, self).__init__()
# TODO: This isn't quite right.
# This option will still apply to page cyclers that run after this one.
self.test.clear_cache_before_each_run = True
class PageCyclerTop10Mobile(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.Top10MobilePageSet
options = {'pageset_repeat': 10}
class PageCyclerKeyMobileSites(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.KeyMobileSitesPageSet
options = {'pageset_repeat': 10}
@test.Disabled('android') # crbug.com/357326
class PageCyclerToughLayoutCases(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.ToughLayoutCasesPageSet
options = {'pageset_repeat': 10}
# crbug.com/273986: This test is really flakey on xp.
# cabug.com/341843: This test is always timing out on Android.
@test.Disabled('android', 'win')
class PageCyclerTypical25(test.Test):
test = page_cycler.PageCycler
page_set = page_sets.Typical25PageSet
options = {'pageset_repeat': 10}
| StarcoderdataPython |
1696920 | import numpy as np
import tools
import warnings
class Alpha():
"""
Docstring for ALPHA.
Alpha is the an influence coefficient matrix
Influence coefficient matrix is a representation of the change of vibration
vector in a measuring point when putting a unit weight on a balancing plane.
"""
def __init__(self:'Influence matrix', name:'string'=''):
"""
Instantiate an instance of Alpha
name: optional name of Alpha
"""
self.name = name
def add(self, direct_matrix:'np.array'=None, A:'intial_vibraion numpy.array'=None,
B:'trial matrix numpy.array'=None, U:'trial weight row vector numpy.array'=None,
keep_trial:'optional keep the previous trial weight in every succeeding trial'=False,
name:'string'=''):
'''
Method to add new values for Alpha instance
either the direct_matrix is needed or ALL of (A, B, U)
Args:
direct_matrix: numpy array M rows -> measuring points,
N columns -> balancing planes
A: Initial vibration column array -> numpy array
B: Trial matrix MxN array -> numpy array
U: Trial weights row array -> numpy array
alpha = (A - B) / U
'''
try: # test if direct input
_ = direct_matrix.shape # TODO raise error when matrix is 1 dim
if direct_matrix.shape[0] >= direct_matrix.shape[1]:
self.value = direct_matrix
else:
raise tools.CustomError('Number of rows(measuring points) should be '
'equal or more than the number of columns '
'(balancing planes)!')
except AttributeError:
# if direct matrix is not input calculate it from A, B, U
# test the exstiance of A, A0, B, U to calculate ALPHA
try:
all([A.shape, B.shape, U.shape])
# Test dimensions
if A.shape[1] > 1:
raise tools.CustomError('`A` should be column vector')
elif U.ndim > 1:
raise tools.CustomError('`U` should be row vector')
elif B.shape[0] != A.shape[0] or B.shape[1] != U.shape[0]:
raise tools.CustomError('`B` dimensions should match `A`and `U`')
else:
if not keep_trial:
self.value = (B - A) / U
else:
_A_keep_trial = np.delete((np.insert(B, [0], A, axis=1)),
-1, axis=1)
self.value = (B - _A_keep_trial) / U
except AttributeError:
raise tools.CustomError('Either direct_matrix or (A,B,U) '
'should be passed "numpy arrays"')
def check(self, ill_condition_remove=False):
'''
Method to check the alpha value
* check the symmetrical of the matrix (check for square matrix only,
for square matrix it should be symmetric obeyin the reciprocity law)
* check for ill conditioned planes:
if for any reason two or more planes has independent readings
for example [[1, 2 , 3], [2, 4, 6]] this is named as ill-conditioned planes
as they does not carry new information from the system and considering them
cause solution infliteration.
ill_condition_remove = True : remove the ill_condition planes after the check
'''
self.M = self.value.shape[0]
self.N = self.value.shape[1]
if self.M == self.N:
_check_sym = np.allclose(self.value, self.value.T, 0.1, 1e-06)
if not _check_sym:
warnings.warn('Warning: Influence Matrix is asymmetrical!')
_check_status_sym = 'Influence Matrix is asymmetrical, check your data'
else:
_check_status_sym = 'Influence Matrix is symmetric --> OK'
else:
_check_status_sym = 'Not a square matrix --> no exact solution'
# Checking ILL-CONDITIONED planes
ill_plane = tools.ill_condition(self.value)
if ill_plane:
_check_ill_condition = 'Ill condition found in plane{}'.format(ill_plane)
if ill_condition_remove:
self.value = np.delete(self.value,[ill_plane], axis=1)
else:
_check_ill_condition ='No ill conditioned planes --> ok'
return print('{}\n\n{}'.format(_check_status_sym, _check_ill_condition))
| StarcoderdataPython |
4818370 | <reponame>sdpython/pymyinstall
"""
@brief test log(time=140s)
skip this test for regular run
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pymyinstall.installcustom import install_pandoc
class TestLONGPandoc (unittest.TestCase):
def test_install_pandoc(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.split(__file__)[0])
if sys.platform.startswith("win"):
temp = os.path.join(fold, "temp_pandoc")
if not os.path.exists(temp):
os.mkdir(temp)
for _ in os.listdir(temp):
if ".msi" in _:
os.remove(os.path.join(temp, _))
r = install_pandoc(
temp_folder=temp,
fLOG=fLOG,
install=False,
force_download=True)
self.assertTrue(os.path.exists(r))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
161325 | <gh_stars>0
import operator
def sortIncreasing(results):
print(operator.itemgetter(1))
results.sort(key = operator.itemgetter(1))
return results
def sortDecreasing(results):
results.sort(key = operator.itemgetter(1), reverse = True)
return results
def sortIncreasingDiscoount(results):
results.sort(key = operator.itemgetter(1))
return results | StarcoderdataPython |
3254883 | #!/usr/bin/env python
# * coding: utf8 *
'''
config.py
A module that contains the static names and places of feature classes
'''
feature_classes = {
#: Input dataset names, set these (must all exist in the workspace gdb)
#: Current Broadband Service layer
'bb_service': 'Utilities_BroadbandService_20201105',
#: Current Address Points layer
'address_points': 'AddressPoints_20200923',
#: County Boundaries
'counties': 'county_boundaries',
#: Municipal Boundaries
'municip': 'municipal_boundaries',
#: Approximate unincorporated areas
'unincorp': 'unincorporated_boundaries',
#: Output dataset names
'address_service_final': 'Address_Service_Final_20201105no_syringa',
'msba': 'MSBA_20201105no_syringa',
'address_count_area': 'AddressCount_AreaName_20201105no_syringa',
'address_count_type': 'AddressCount_AreaType_20201105no_syringa',
'address_count_county': 'AddressCount_County_20201105no_syringa'
}
| StarcoderdataPython |
4815667 | <reponame>mechkro/TestGrounds<filename>test3.py
import tkinter as tk
class newwin3(object):
""" """
def __init__(self, master):
self.master = master
self.emptycurrent()
self.addnew()
#--------------------------------------
def emptycurrent(self):
""" """
for i in self.master.winfo_children():
i.destroy()
return
#--------------------------------------
def addnew(self):
""" """
self.f = tk.Frame(self.master, bg = 'green')
self.f.grid(padx = 2, pady = 2)
self.l = tk.Button(self.f, text = 'Win 3',
command = lambda: self.newwin())
self.l.grid(padx = 10, pady = 10)
#--------------------------------------
def newwin(self):
""" """
self.ll = tk.Label(self.f, text = 'Tada',
bg = 'green')
self.ll.grid(row = 1, padx = 5, pady = 5)
self.b2 = tk.Button(self.f, text = 'back',
command = lambda: self.backwin())
self.b2.grid(row = 2, padx = 5, pady = 5)
#--------------------------------------
def backwin(self):
""" """
from oldwin import Recalled
self.bw = Recalled(self.master)
| StarcoderdataPython |
3298164 | <reponame>fernandolguevara/crosswalk-app-tools
#!/usr/bin/env python
import os
import sys
import commands
import shutil
from optparse import OptionParser
import urllib2
import re
from bs4 import BeautifulSoup
import platform
os.system("node -v")
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
crosswalk_test_suite = os.path.join(SCRIPT_PATH, "crosswalk-test-suite")
tmp = os.path.join(SCRIPT_PATH, "tmp")
apptools = os.path.join(crosswalk_test_suite, "apptools")
apptools_android_tests = os.path.join(tmp, "apptools-android-tests")
apptools_windows_tests = os.path.join(tmp, "apptools-windows-tests")
apptools_ios_tests = os.path.join(tmp, "apptools-ios-tests")
os.environ['CROSSWALK_APP_SRC'] = os.path.join(SCRIPT_PATH, "src") + "/"
returnCode = 0
if os.path.exists(crosswalk_test_suite):
os.chdir(crosswalk_test_suite)
cmd = 'git pull'
returnCode = os.system(cmd)
os.chdir(SCRIPT_PATH)
else:
cmd = 'git clone https://github.com/crosswalk-project/crosswalk-test-suite'
returnCode = os.system(cmd)
if returnCode == 1:
sys.exit(1)
if os.path.exists(tmp):
shutil.rmtree(tmp)
def crosswalk_version(channel, platform):
htmlDoc = urllib2.urlopen(
'https://download.01.org/crosswalk/releases/crosswalk/' + platform + '/' +
channel +
'/').read()
soup = BeautifulSoup(htmlDoc)
alist = soup.find_all('a')
version = ''
for index in range(-1, -len(alist)-1, -1):
aEle = alist[index]
version = aEle['href'].strip('/')
if re.search('[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*', version):
break
return version
def main():
usage = "Usage: ./lint.py -p android"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-p",
dest="platform",
help="specify the testsuit platform, e.g. android, windows, ios")
opts_parser.add_option(
"-a",
dest="arch",
help="specify the packing apk bit, e.g. 32bit, 64bit")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
if BUILD_PARAMETERS.platform == "android":
os.environ['CROSSWALK_APP_TOOLS_CACHE_DIR'] = os.path.join(apptools_android_tests, "tools")
x = []
for i in list(os.popen('adb devices -l'))[1:]:
if i.strip(os.linesep) != "" and i.strip(os.linesep).split(" ")[0] != "*":
x.append(i.strip(os.linesep).split(" ")[0])
if x:
os.environ['DEVICE_ID'] = ",".join(x)
os.environ['SKIP_EMULATOR'] = "True"
android_crosswalk_version = crosswalk_version("stable", BUILD_PARAMETERS.platform)
shutil.copytree(os.path.join(apptools, "apptools-android-tests"), apptools_android_tests)
fp = open(apptools_android_tests + '/arch.txt', 'w+')
fp.write("arm")
fp.close()
if platform.system() != "Linux":
hp = open(apptools_android_tests + "/host.txt", 'w+')
hp.write("Windows")
hp.close()
else:
hp = open(apptools_android_tests + "/host.txt", 'w+')
hp.write("Android")
hp.close()
if BUILD_PARAMETERS.arch == "64bit":
vp_64 = open(apptools_android_tests + "/version.txt", 'w+')
vp_64.write(android_crosswalk_version + " 64")
vp_64.close()
os.chdir(os.path.join(apptools_android_tests, "tools"))
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/stable/" + android_crosswalk_version + "/crosswalk-" + android_crosswalk_version + "-64bit.zip").read()
with open("crosswalk-" + android_crosswalk_version + "-64bit.zip", 'wb') as f:
f.write(data)
else:
vp_32 = open(apptools_android_tests + "/version.txt", 'w+')
vp_32.write(android_crosswalk_version + " 32")
vp_32.close()
os.chdir(os.path.join(apptools_android_tests, "tools"))
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/stable/" + android_crosswalk_version + "/crosswalk-" + android_crosswalk_version + ".zip").read()
with open("crosswalk-" + android_crosswalk_version + ".zip", 'wb') as f:
f.write(data)
os.chdir(os.path.join(os.path.join(apptools_android_tests, "apptools"), "CI"))
if platform.system() != "Linux":
retval = os.system("python -m unittest discover --pattern=crosswalk_pkg_basic.py > null")
else:
retval = os.system("python -m unittest discover --pattern=*.py")
elif BUILD_PARAMETERS.platform == "windows":
os.environ['CROSSWALK_APP_TOOLS_CACHE_DIR'] = os.path.join(apptools_windows_tests, "tools")
shutil.copytree(os.path.join(apptools, "apptools-windows-tests"), apptools_windows_tests)
os.chdir(os.path.join(apptools_windows_tests, "tools"))
windows_crosswalk_version = crosswalk_version("canary", BUILD_PARAMETERS.platform)
try:
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/canary/" + windows_crosswalk_version + "/crosswalk-" + windows_crosswalk_version + ".zip").read()
with open("crosswalk-" + windows_crosswalk_version + ".zip", 'wb') as f:
f.write(data)
except Exception as e:
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/canary/" + windows_crosswalk_version + "/crosswalk64-" + windows_crosswalk_version + ".zip").read()
with open("crosswalk64-" + windows_crosswalk_version + ".zip", 'wb') as f:
f.write(data)
os.chdir(os.path.join(os.path.join(apptools_windows_tests, "apptools"), "CI"))
retval = os.system("python -m unittest discover --pattern=*.py > null")
elif BUILD_PARAMETERS.platform == "ios":
shutil.copytree(os.path.join(apptools, "apptools-ios-tests"), apptools_ios_tests)
os.chdir(os.path.join(os.path.join(apptools_ios_tests, "apptools"), "CI"))
retval = os.system("python -m unittest discover --pattern=*.py > null")
return retval
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
4832872 | """
This file implements a general simulation interface. The SimulationWindow has a space
for thedisplay of a specific task, a display area showing the counts of steps and
episodes so far, and buttons for controlling the simulation. It also adds two menus -
a File menu and a Simulation menu.
It is easy to make a GUI simulation interface for your RL task by specializing
the SimulationWindow class with methods appropriate for your application.
Make a specialized simulation window class with:
class MyApplication(SimulationWindow):
def __init__(wwidth, wheight): # wwidth and wheight specify the size of the data display area you want.
SimulationWindow.__init__(self, wwidth, wheight)
... your own stuff here ...
... including setting various members and adding extra buttons and menus
Make gViews with the SimulationWindow as the parent for the data areas in your simulation interface.
It uses an RLinterface to do the actual steps and episodes. In your initialization, make
sure that you set the data member rlsim to an RLinterface.
self.rlsim = RLinterface(agentFunction, EnvironmentFunction)
The following methods can and in some cases should be defined by the user:
updateSimDisplay(self) - update the data display area of the simulation - required!
wholeSimDisplay(self) - redraw the data display area of the simulation - required!
printInfo(self) - what to do if the user chooses Print under the File menu
resetSimulation(self) - resets the agent and environment
For reading and writing, define methods:
readFile(self, filename) - reads filename
(called from openFile, invoked by Open on File menu)
writeFile(self, filename) - writes current simulation info into filename
(called from saveFile and saveFileAs, invoked by Save and SaveAs on File menu)
also set self.readtitle as the title of the open file dialog
self.writetitle as the title of the save file dialog
self.initialdir as the directory to start reading or writing at
or for more control, specify the menu handling methods themselves:
openFile(self) - what to do if the user chooses Open under the File menu
saveFile(self) - what to do if the user chooses Save under the File menu
saveFileAs(self) - what to do if the user chooses Save As under the File menu
The preset buttons are:
Start/Stop - starts or stops a currently running simulation
Step - executes one step of the simulation
Episode - executes one episode of the simulation
The previous 3 buttons use the RLinterface member and call methods step and episode for it
Faster - makes the execution faster by updating the display less
Slower - makes the execution slower by updating the display more or pausing before updates
Menu items:
File menu:
Open - calls method openFile(self)
Save - calls method saveFile(self)
Save As - calls method saveFileAs(self)
Print - calls method printInfo(self)
Quit - quits the simulation and stops event processing
Simulation menu: the button commands are repeated here, and some additional things
Start/Stop simulation
Step simulation
Simulate one episode
Faster
Slower
Redisplay - calls method gDrawView
Redisplay All - calls method wholeView
Reset Simulation - calls method resetSimulation(self)
Some methods which you may wish to call:
gDrawView(self) - force updating of the whole display
wholeView(self) - force redrawing of the whole display
debugon(self) - returns True is debug mode is set to on, False otherwise
Note: your agent and environment functions should update the data members
stepnum, episodenum, and episodestepnum for the count displays to work properly
"""
# Example of usage:
"""
Given functions myAgentFn and myEnvFn that conform to the RLinterface requirements:
def myAgentFn(state, reward=None):
...
return action
def myEnvFn(action=None):
...
... increment stepnum, episodestepnum, episodenum appropriately
return state, reward
class MySimClass (SimulationWindow):
def __init__(self, wwidth, wheight):
SimulationWindow.__init__(wwidth, wheight)
gSetTitle(self, "My Simulation")
self.rlsim = RLinterface(myAgentFn, myEnvFn)
dataview = Gview(self)
...draw desired stuff on the dataview ...
def wholeSimDisplay(self):
...draw dataview however you want...
gMakeVisible(self) # might be necessary to force the changes to show
def updateSimDisplay(self):
... update dataview with current data ...
gMakeVisible(self) # might be necessary to force the changes to show
def resetSimulation(self):
... reset agent info ...
... reset environment inf, including stepnum, episodestepnum and episodenum
self.wholeView() # force drawing of entire simulation display
mysim = MySimClass(1000, 1000)
gMainloop()
To add additional buttons or menus:
inside __init__
x1, y1, x2, y2 = gdGetViewport(self)
gdSetViewport(self, x1, y1+20, x2, y2+50) # compensate for title bar, add enough for buttons
gdAddButton(self, "myNewButton", self.myFunction, 5, y2)
self.addMyMenu() # add another menu
def myFunction(self):
...
self.wholeSimDisplay()
def addMyMenu(self):
gAddMenu(self, "My Menu", ...)
"""
from RLtoolkit.g import *
from RLtoolkit.basicclasses import *
import sys
import time
class SimulationWindow(Gwindow, Simulation):
def __init__(self, wwidth=500, wheight=600):
Simulation.__init__(self)
if sys.platform in ['mac', 'darwin']:
extrah = 30
else:
extrah = 50 # account for menu being added to window in Windows and Linus
Gwindow.__init__(self, windowTitle="Simulation Window",
gdViewportR=(0, 20, wwidth, wheight + extrah))
self.simulationrunning = False
self.updatedisplay = True
self.displaypause = 0
self.redrawinterval = 1
self.countsx = self.countsy = 0 # xcoord and ycoord of time displays
self.lastcount = None
self.dcount = 0 # display counter
self.status = Gview(self)
self.goff = gColorOff(self)
gdSetViewportR(self.status, 0, wheight, wwidth, 30)
self.gobutton = gdAddButton(self.status, "Go ", self.simStopGo, 5, 0,
self.goff)
self.stepbutton = gdAddButton(self.status, "Step", self.singleStep, 65,
0, self.goff)
self.episodebutton = gdAddButton(self.status, "Episode",
self.singleEpisode, 125, 0, self.goff)
if wwidth < 350: # make the window longer and add the buttons there
gdSetViewportR(self.status, 0, wheight, wwidth, 60)
self.fastbutton = gdAddButton(self.status, "Faster ",
self.simFaster, 5, 30, self.goff)
gdAddButton(self.status, "Slower", self.simSlower, 80, 30,
self.goff)
else: # add the buttons horizontally
self.fastbutton = gdAddButton(self.status, "Faster ",
self.simFaster, 210, 0, self.goff)
gdAddButton(self.status, "Slower", self.simSlower, 285, 0,
self.goff)
self.debug = gIntVar()
self.debug.set(0)
self.setupTimeDisplay()
self.addFileMenu()
self.addSimulationMenu()
self.readtitle = "Open File"
self.writetitle = "Save File As"
self.initialdir = None
def gDrawView(self):
if self.updatedisplay:
self.updateSimDisplay()
self.simDisplayCounts()
gMakeVisible(self)
def gKeyEventHandler(self, key):
print(("got key", key))
def wholeView(self):
self.dcount = 0
self.wholeSimDisplay()
self.simDisplayCounts()
gMakeVisible(self)
def simstep(self):
if self.simulationrunning:
self.rlsim.stepsQ(
self.redrawinterval) # do a number of steps at once for speed
self.simDisplay()
gCheckEvents(self, self.simstep)
# self.after(1, self.simstep) # using tk method after to force it to check for stop event
def simStopGo(self):
if self.simulationrunning: # already running, stop it
self.simulationrunning = False # setSimulationRunning(self, False)
gButtonEnable(self.stepbutton)
gButtonEnable(self.episodebutton)
gSetTitle(self.gobutton, "Go ")
self.wholeView()
else: # set it running
self.simulationrunning = True # setSimulationRunning(self, True)
gButtonDisable(self.stepbutton)
gButtonDisable(self.episodebutton)
gSetTitle(self.gobutton, "Stop")
gMakeVisible(self)
self.simstep()
def singleStep(self):
self.rlsim.step()
self.wholeView()
def epstep(self):
if self.simulationrunning:
self.rlsim.step() # one step at a time - must check for episode termination
self.simDisplay()
if self.rlsim.action != None: # end of episode
gCheckEvents(self, self.epstep)
# self.after(1, self.epstep) # using tk method after to force it to check for stop event
else:
self.simStopGo() # reset buttons on display
def singleEpisode(self):
if not self.simulationrunning:
gButtonDisable(self.stepbutton)
gButtonDisable(self.episodebutton)
gSetTitle(self.gobutton, "Stop")
self.simulationrunning = True
self.rlsim.action = None # force start of episode
self.epstep()
def simFaster(self):
if self.displaypause == 0:
gSetTitle(self.fastbutton, "Jumpier")
self.redrawinterval = 2 * self.redrawinterval
if self.redrawinterval > 32:
self.updatedisplay = False
elif self.displaypause <= 0.01:
self.displaypause = 0
gSetTitle(self.fastbutton, "Faster ")
self.redrawinterval = 1
else:
self.displaypause = self.displaypause / 2
def simSlower(self):
if self.displaypause > 0:
self.updatedisplay = True
self.displaypause = max(0.01, 2 * self.displaypause)
elif self.redrawinterval == 1:
self.updatedisplay = True
gSetTitle(self.fastbutton, "Faster ")
self.displaypause = 0.01
else:
self.updatedisplay = True
self.redrawinterval = self.redrawinterval // 2
if self.redrawinterval == 1:
gSetTitle(self.fastbutton, "Faster ")
def simDisplay(self):
self.dcount += 1
pause(self.displaypause)
if self.redrawinterval != None and self.dcount % self.redrawinterval == 0:
self.gDrawView()
def gDestroy(self, event):
global GDEVICE
Gwindow.gDestroy(self, event)
if GDEVICE.childwindows == []:
self.quit()
def exit(self):
gQuit()
def setupTimeDisplay(self):
oldx1, oldy1, oldx2, oldy2, oldcorner = gGetCS(self.status)
self.countsy = 10
self.countsx = self.wwidth - 60
def simDisplayCounts(self):
# Note: the specific application must update the stepnum, episodenum
# and episodestepnum !!!
if self.countsx != None:
if self.lastcount != None:
gDelete(self.status, self.lastcount)
countstr = str(self.stepnum) + '|' + str(self.episodenum) + \
'|' + str(self.episodestepnum)
self.lastcount = gdDrawTextCentered(self.status, countstr,
("Chicago", 12, "normal"), \
self.countsx, self.countsy, gOn)
def wholeSimDisplay(self):
"display routine to redraw entire display - should be specified for each application"
pass
def updateSimDisplay(self):
"update routine for display - should be specialized for each application"
pass
def openFile(self):
"open simulation file"
filename = gOpenFileUserPick(None, \
title=self.readtitle, \
initialdir=self.initialdir)
self.readFile(filename)
def readFile(self, filename):
"open file - should be specialized for each application"
print("File not read - there is no readFile method")
pass
def saveFile(self):
"save currently open file"
filename = filenameFromTitle(self.title)
if filename != None and filename != "":
self.writeFile(filename)
else:
self.saveFileAs()
pass
def saveFileAs(self):
"save current simulation as"
filename = gSaveFileUserPick(self, \
title=self.writetitle, \
initialdir=self.initialdir)
if filename != None and filename != '': # not cancelled
self.writeFile(filename)
setWindowTitleFromNamestring(self, filename)
def writeFile(self, filename):
"save current simulation info - should be specialized for each application"
print("File not saved - there is no writeFile method")
pass
def printInfo(self):
"print simulation info - should be specialized for each application"
pass
def resetSimulation(self):
"reset simulation - should be specialized for each application"
pass
def debugon(self):
if self.debug.get() == 1:
return True
else:
return False
def toggleDebug(self):
debugset(self.debugon())
def addSimulationMenu(self):
gAddMenu(self, "Simulation", \
[["Start/Stop simulation", self.simStopGo], \
["Step simulation", self.singleStep], \
["Simulate one episode", self.singleEpisode], \
["Faster ", self.simFaster], \
["Slower", self.simSlower], \
'---', \
["Redisplay", self.gDrawView], \
["Redisplay All", self.wholeView], \
'---', \
["Reset Simulation", self.resetSimulation], \
'---', \
['button', "Debug Mode", self.debug, 1, 0, self.toggleDebug], \
])
def addFileMenu(self):
gAddMenu(self, "File", \
[["Open ...", self.openFile], \
["Save", self.saveFile], \
["Save As ...", self.saveFileAs], \
["Print", self.printInfo], \
["Quit", self.exit]])
def pause(seconds):
time.sleep(seconds)
"""
x = 0
for i in range(int(seconds * 118000000)):
x*= 1
"""
def filenameFromTitle(title):
position = title.find('-')
if position != None and position != -1:
name = title[:position]
path = title[position + 1:]
filename = path + '/' + name
filename = filename.strip()
return filename
def setWindowTitleFromNamestring(window, filename):
if isinstance(window, Gwindow):
position = filename.rfind('/')
if position == None or position == -1:
newtitle = filename
else:
newtitle = filename[position + 1:] + ' - ' + filename[:position]
window.title = newtitle
gSetTitle(window, newtitle)
| StarcoderdataPython |
3280473 | """Unit test package for cellartracker."""
| StarcoderdataPython |
107777 | <reponame>MandelaK/SendITAPI-V2
import psycopg2
from db_config import init_db
from flask_jwt_extended import jwt_required, get_jwt_identity
class Parcels():
"""The methods defined in this class represent methods that users
will use to manipulate parcels in the database"""
def __init__(self):
self.db = init_db()
def create_parcel(self, parcel_name, recipient_name, pickup_location,
destination, weight):
"""This method handles requests for creating parcel delivery orders"""
user_data = get_jwt_identity()
self.parcel_name = parcel_name
self.sender_email = user_data["email"]
self.recipient_name = recipient_name
self.destination = destination
self.pickup_location = pickup_location
self.current_location = pickup_location
self.weight = int(weight)
self.price = int(weight) * 3
self.status = "pending"
parcel_info = {
"parcel_name": parcel_name,
"sender_email": self.sender_email,
"recipient_name": recipient_name,
"pickup_location": pickup_location,
"current_location": self.current_location,
"destination": destination,
"weight": int(weight),
"price": int(self.price),
"status": self.status
}
save_parcel = """
INSERT INTO parcels (parcel_name, sender_email, recipient_name,
pickup_location, current_location, destination, weight, price, status)
VALUES (%(parcel_name)s, %(sender_email)s, %(recipient_name)s,
%(pickup_location)s, %(current_location)s, %(destination)s, %(weight)s,
%(price)s, %(status)s)"""
try:
cursor = self.db.cursor()
print("Successfully created cursor. Saving parcel to database...")
cursor.execute(save_parcel, parcel_info)
except (Exception, psycopg2.Error) as error:
print("Could not save the parcel to database: ", error)
return error
else:
self.db.commit()
print("Successfully saved the order to database")
return 201
def change_destination(self, parcel_id, destination):
"""This method will handle requests to the database to change the destination
of a parcel delivery order."""
user_data = get_jwt_identity()
parcel = self.get_parcel_by_id(parcel_id)
if not parcel:
return 404
elif parcel[2] != user_data["email"]:
return 401
elif parcel[9] != "pending":
return 400
else:
update_destination = """UPDATE parcels SET destination = '{}'
WHERE parcel_id = {}""".format(destination, parcel_id)
try:
cursor = self.db.cursor()
print("Successfully created cursor. Updating destination for parcel number {} ...".format(
parcel_id))
cursor.execute(update_destination)
self.db.commit()
count = cursor.rowcount
print("Destination successfully changed for parcel {}. {} rows affected".format(
parcel_id, count))
return 204
except (Exception, psycopg2.Error) as error:
print("Error. Could not update destination of parcel: ", error)
return error
def get_parcel_by_id(self, parcel_id):
"""We have to validate a parcel exists before we can begin to make
changes on it."""
get_parcel = """SELECT * FROM parcels WHERE parcel_id = {}
""".format(parcel_id)
try:
cursor = self.db.cursor()
print("Successfully created cursor. Getting parcel {} ...".format(parcel_id))
cursor.execute(get_parcel)
parc = cursor.fetchone()
if not parc:
return False
else:
return parc
except (Exception, psycopg2.Error) as error:
print ("Could not get parcel {}... ".format(parcel_id), error)
return error
@jwt_required
def get_all_parcels(self):
"""This will be called if the admin wishes to see all parcels
in the database"""
user_data = get_jwt_identity()
if user_data["is_admin"] is True:
admin = True
admin_get_all = """
SELECT * FROM parcels
ORDER BY parcel_id"""
else:
admin = False
user_get_all = """
SELECT * FROM parcels
WHERE sender_email = '{}'
ORDER BY parcel_id
""".format(user_data["email"])
try:
cursor = self.db.cursor()
print("Successfully created cursor. Getting all parcels ...")
if admin is True:
cursor.execute(admin_get_all)
else:
cursor.execute(user_get_all)
data = cursor.fetchall()
if data == []:
return 404
res = []
for parcel, parcels in enumerate(data):
parcel_id, parcel_name, sender_email, recipient_name, pickup_location, current_location, destination, weight, price, status = parcels
structured_response = dict(
parcel_id=parcel_id,
parcel_name=parcel_name,
sender_email=sender_email,
recipient_name=recipient_name,
pickup_location=pickup_location,
current_location=current_location,
destination=destination,
weight=int(weight),
price=int(price),
status=status)
res.append(structured_response)
return res
except (Exception, psycopg2.Error) as error:
print("Could not get any parcels from database: ", error)
return error
def change_status(self, parcel_id, status):
"""This method handles requests to change the status of an order"""
parcel = self.get_parcel_by_id(parcel_id)
if not parcel:
return 404
elif parcel[9] == "delivered" or parcel[9] == "cancelled":
return 400
else:
change_status = """
UPDATE parcels SET status = '{}' WHERE parcel_id = {}""".format(status, parcel_id)
try:
cursor = self.db.cursor()
print("Successfully created cursor. Changing the status of parcel number {} ...".format(
parcel_id))
cursor.execute(change_status)
self.db.commit()
count = cursor.rowcount
print("Successfully changed the status parcel {}. {} rows affected.".format(
parcel_id, count))
return 204
except (Exception, psycopg2.Error) as error:
print("Could not change the status of the order: ", error)
return error, 400
def change_location(self, parcel_id, current_location):
"""This method handles requests to change the location of a delivery in transit"""
parcel = self.get_parcel_by_id(parcel_id)
if not parcel:
return 404
elif parcel[9] != "transit":
return 400
else:
update_location = """
UPDATE parcels SET current_location = '{}'
WHERE parcel_id = {}""".format(current_location, parcel_id)
try:
cursor = self.db.cursor()
print("Successfully created cursor. Updating current location for parcel \
number {} ...".format(parcel_id))
cursor.execute(update_location)
self.db.commit()
print("Location successfully changed")
return 204
except (Exception, psycopg2.Error) as error:
print("Could not change destinaion of parcel: ", error)
return error
@jwt_required
def cancel_parcel(self, parcel_id):
"""User can only cancel orders they create so long as they are not yet
'delivered'"""
user_data = get_jwt_identity()
get_parcel = self.get_parcel_by_id(parcel_id)
if get_parcel is False:
return 404
elif user_data["email"] != get_parcel[2]:
return 401
elif get_parcel[9] == "cancelled" or get_parcel[9] == "delivered":
return 400
else:
cancel_query = """UPDATE parcels
SET status = 'cancelled' WHERE parcel_id = {}""".format(parcel_id)
try:
cursor = self.db.cursor()
print("Successfully created cursor. Cancelling parcel number {} ...".format(
parcel_id))
cursor.execute(cancel_query)
self.db.commit()
count = cursor.rowcount
print("Successfully changed the status parcel {}. {} rows affected.".format(
parcel_id, count))
return 204
except (Exception, psycopg2.Error) as error:
print("Could not change the status of the parcel: ", error)
return error
| StarcoderdataPython |
4803162 | <gh_stars>0
#!C:\Python27\python.exe
import sys, os
from PyQt4 import QtCore, QtGui
from docx import Document
from docx.shared import Cm, Inches, Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH
from PIL import Image
from StringIO import StringIO
import dokumentasi_ui
Ui_Main = dokumentasi_ui.Ui_MainWindow
def main():
app = QtGui.QApplication(sys.argv)
window = Dokumentasi()
window.show()
sys.exit(app.exec_())
class Dokumentasi(QtGui.QMainWindow, Ui_Main):
def __init__(self):
super(Dokumentasi, self).__init__()
self.setupUi(self)
self.prepareUi()
self.btnInput.clicked.connect(self.browseInput)
self.btnOutput.clicked.connect(self.browseOutput)
self.btnExit.clicked.connect(self.close)
self.btnStart.clicked.connect(self.start)
self.btnLog.clicked.connect(self.toggleLog)
def prepareUi(self):
self.setWindowTitle('AutoDoc v1.2 - Karogis')
self.mTop.setValidator(QtGui.QDoubleValidator(0, 10, 2, self))
self.mBottom.setValidator(QtGui.QDoubleValidator(0, 10, 2, self))
self.mLeft.setValidator(QtGui.QDoubleValidator(0, 10, 2, self))
self.mRight.setValidator(QtGui.QDoubleValidator(0, 10, 2, self))
self.logger('====================================')
self.logger('|| ***** www.karogis.com ***** ||')
self.logger('====================================\n\n')
self.logVisible = False
self.toggleLog()
self.statusbar.showMessage('Siap')
def browseInput(self):
directory = QtGui.QFileDialog.getExistingDirectory(self, 'Pilih Folder')
if directory:
self.inputFolder.setText(directory)
self.logger('Folder Sumber: ' + directory)
self.dirs = 0
self.images = 0
self.evaluateDirectory(directory)
self.logger('Jumlah Folder: ' + str(self.dirs))
self.logger('Jumlah Foto: ' + str(self.images))
self.statusbar.showMessage('Jumlah Folder: ' + str(self.dirs) + ', Jumlah Foto: ' + str(self.images))
def evaluateDirectory(self, directory, strip = '-'):
directory = str(directory)
for file in os.listdir(directory):
if file.lower().endswith('.jpg') or file.lower().endswith('.jpeg') or file.lower().endswith('.png'):
self.logger(strip + ' Foto: ' + file)
self.images += 1
QtGui.QApplication.processEvents()
for file in os.listdir(directory):
if os.path.isdir(os.path.join(directory, file)):
self.dirs += 1
self.logger('=====================================')
self.logger(strip + ' Folder: ' + file)
self.evaluateDirectory(os.path.join(directory, file), strip + ' -')
self.logger('=====================================')
QtGui.QApplication.processEvents()
def browseOutput(self):
file = QtGui.QFileDialog.getSaveFileName(self, 'Simpan Sebagai', '', 'Word Document (*.docx)')
if file:
self.outputFolder.setText(file)
self.logger('File Hasil: ' + file)
def start(self):
if self.inputFolder.text() == '' or self.outputFolder.text() == '':
self.showDialog('error', 'Kesalahan!', 'Folder Sumber & File Hasil harus diisi')
return
elif self.images == 0:
self.showDialog('error', 'Kesalahan!', 'Tidak ditemukan foto')
return
top_margin = float(self.mTop.text()) if self.mTop.text() != '' else 2.0
bottom_margin = float(self.mBottom.text()) if self.mBottom.text() != '' else 1.5
left_margin = float(self.mLeft.text()) if self.mLeft.text() != '' else 3.0
right_margin = float(self.mRight.text()) if self.mRight.text() != '' else 1.5
self.logger('\nMenggunakan pengaturan jarak: Atas = ' + str(top_margin) +
'cm; Bawah = ' + str(bottom_margin) +
'cm; Kiri = ' + str(left_margin) +
'cm; Kanan = ' + str(right_margin) + 'cm')
self.doc = Document()
try:
self.doc.save(str(self.outputFolder.text()))
except Exception as e:
self.logger('KESALAHAN: ' + e.strerror)
self.showDialog('error', 'Kesalahan', e.strerror)
return
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.logger('\n\nMemulai Proses...')
self.logger('Ngopi sek coy...\n\n')
self.statusbar.showMessage('Ngopi sek coy...')
self.btnStart.setText('Udud dulu...')
self.btnStart.setEnabled(False)
self.btnExit.setEnabled(False)
self.progressBar.setMaximum(self.images)
self.progressBar.setValue(0)
self.progressCounter = 0
QtGui.QApplication.processEvents()
for section in self.doc.sections:
section.top_margin = Cm(top_margin)
section.bottom_margin = Cm(bottom_margin)
section.left_margin = Cm(left_margin)
section.right_margin = Cm(right_margin)
section.page_width = Inches(8.267)
section.page_height = Inches(11.692)
directory = str(self.inputFolder.text())
self.insertPictures(directory)
QtGui.QApplication.processEvents()
QtGui.QApplication.restoreOverrideCursor()
try:
self.doc.save(str(self.outputFolder.text()))
except Exception as e:
self.logger('KESALAHAN: ' + e.strerror)
self.statusbar.showMessage('Siap')
self.btnStart.setText('Mulai')
self.btnStart.setEnabled(True)
self.btnExit.setEnabled(True)
self.showDialog('error', 'Kesalahan', e.strerror)
else:
self.logger('Selesai!')
self.statusbar.showMessage('Siap')
self.btnStart.setText('Mulai')
self.btnStart.setEnabled(True)
self.btnExit.setEnabled(True)
self.showDialog('info', 'Selesai coy...', 'Dokumentasi berhasil dibuat.\nBuka file?',
[('Tidak', QtGui.QMessageBox.NoRole), ('Ya', QtGui.QMessageBox.YesRole)],
self.openFile)
def insertPictures(self, directory):
counter = 0
titlePrefix = str(self.titlePrefix.text()) + ' ' if self.titlePrefix.text() != '' else ''
if self.countPictures(directory) > 0:
heading = self.doc.add_heading(titlePrefix + os.path.basename(directory).upper())
heading.alignment = WD_ALIGN_PARAGRAPH.CENTER
heading_format = heading.paragraph_format
heading_format.space_before = Pt(0)
heading_format.space_after = Pt(24)
table = self.doc.add_table(rows=1, cols=2)
cells = table.rows[0].cells
for file in os.listdir(directory):
if (file.lower().endswith('.jpg') or file.lower().endswith('.jpeg') or file.lower().endswith('.png')) :
if counter % 6 == 0 and counter != 0:
self.doc.add_page_break()
h = self.doc.add_heading(titlePrefix + os.path.basename(directory).upper())
h.alignment = WD_ALIGN_PARAGRAPH.CENTER
hf = h.paragraph_format
hf.space_before = Pt(0)
hf.space_after = Pt(24)
table = self.doc.add_table(rows=1, cols=2)
cells = table.rows[0].cells
elif counter % 2 == 0 and counter != 0:
cells = table.add_row().cells
self.logger('Memproses foto ' + file)
self.progressCounter += 1
self.progressBar.setValue(self.progressCounter)
QtGui.QApplication.processEvents()
img = Image.open(os.path.join(directory, file))
output = StringIO()
width = 600
width_percent = float(width) / float(img.size[0])
height = int(img.size[1] * width_percent)
image = img.resize((width, height), Image.BICUBIC)
image.save(output, 'JPEG', quality=80)
run = cells[counter % 2].paragraphs[0].add_run()
run.add_picture(output, width=Cm(8))
cells[counter % 2].add_paragraph(os.path.splitext(file)[0]).alignment = WD_ALIGN_PARAGRAPH.CENTER
counter += 1
QtGui.QApplication.processEvents()
for file in os.listdir(directory):
if os.path.isdir(os.path.join(directory, file)):
self.logger('=====================================')
self.logger('Folder ditemukan ' + file)
QtGui.QApplication.processEvents()
if len(self.doc.paragraphs) > 0:
self.doc.add_page_break()
self.insertPictures(os.path.join(directory, file))
QtGui.QApplication.processEvents()
def countPictures(self, directory):
counter = 0
for file in os.listdir(directory):
if (file.lower().endswith('.jpg') or file.lower().endswith('.jpeg') or file.lower().endswith('.png')) :
counter += 1
QtGui.QApplication.processEvents()
return counter
def openFile(self, reply):
if reply == 1:
try:
os.startfile(str(self.outputFolder.text()))
QtGui.QApplication.processEvents()
except Exception as e:
self.showDialog('error', 'Kesalahan', e.strerror)
return
return
def toggleLog(self):
if self.logVisible:
self.label_3.hide()
self.logs.hide()
self.btnLog.setText('Tampilkan Log')
self.logVisible = False
else:
self.label_3.show()
self.logs.show()
self.btnLog.setText('Sembunyikan Log')
self.logVisible = True
def logger(self, string):
self.logs.insertPlainText(string + "\n")
self.logs.ensureCursorVisible()
def showDialog(self, type, title, text, buttons = None, callback = None):
dialog = QtGui.QMessageBox()
if type == 'info':
icon = QtGui.QMessageBox.Information
elif type == 'warning':
icon = QtGui.QMessageBox.Warning
elif type == 'error':
icon = QtGui.QMessageBox.Critical
elif type == 'question':
icon = QtGui.QMessageBox.Question
else:
icon = QtGui.QMessageBox.Information
ico = QtGui.QIcon()
ico.addPixmap(QtGui.QPixmap(":/icon/folder.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
dialog.setIcon(icon)
dialog.setWindowTitle(title)
dialog.setWindowIcon(ico)
dialog.setText(text)
if buttons == None:
dialog.setStandardButtons(QtGui.QMessageBox.Ok)
else:
for button, role in buttons:
dialog.addButton(button, role)
reply = dialog.exec_()
if callable(callback):
callback(reply)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Keluar',
"Sudah ngopi bro?", 'Belum', 'Sudah')
if reply == 1:
event.accept()
else:
event.ignore()
self.showDialog('info', 'Ngopi', 'Ngopi dulu bro...')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1687833 | import pytest
from app.api.services.game_services.service import create_new_game
from app.models.game import Game
from app.schemas.game_schema import Team
@pytest.fixture
def game_creator():
def _game_creator(size, game_id="x"):
players = list(map(str, range(size)))
return create_new_game(game_id, players, players[0])
return _game_creator
def test_create_game_correct_teams(game_creator):
for i in range(1, 10):
game: Game = game_creator(size=i)
has_dutch = i % 2 != 0
britain_players_count = 0
france_players_count = 0
dutch_count = 0
for info in game.players_info.values():
if info.team == Team.BRITAIN:
britain_players_count += 1
elif info.team == Team.FRANCE:
france_players_count += 1
else:
assert has_dutch is True
dutch_count += 1
| StarcoderdataPython |
36813 | <filename>examples/02-hello_kml.py
import os
import simplekml
from polycircles.polycircles import Polycircle
polycircle = Polycircle(latitude=31.611878, longitude=34.505351, radius=100)
kml = simplekml.Kml()
pol = kml.newpolygon(name=f"Polycircle", outerboundaryis=polycircle.to_kml())
kml.save('02.kml') | StarcoderdataPython |
185803 | from zschema.leaves import *
from zschema.compounds import *
import zschema.registry
from ztag.annotation import Annotation
import zcrypto_schemas.zcrypto as zcrypto
import zgrab2_schemas.zgrab2 as zgrab2
import zgrab2_schemas.zgrab2.mssql as zgrab2_mssql
import zgrab2_schemas.zgrab2.oracle as zgrab2_oracle
import zgrab2_schemas.zgrab2.ssh as zgrab2_ssh
__local_metadata = {}
for key in Annotation.LOCAL_METADATA_KEYS:
__local_metadata[key] = WhitespaceAnalyzedString()
local_metadata = SubRecord(__local_metadata)
ztag_dh_export = SubRecord({
"dh_params": zcrypto.DHParams(doc="The parameters for the key."),
"support": Boolean(),
"metadata": local_metadata,
"timestamp": Timestamp(),
})
ztag_dh = SubRecord({
"dh_params": zcrypto.DHParams(doc="The parameters for the key."),
"support": Boolean(),
"metadata": local_metadata,
"timestamp": Timestamp(),
})
ztag_rsa_export = SubRecord({
"rsa_params":zcrypto.RSAPublicKey(),
"support":Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_ecdh = SubRecord({
"ecdh_params":zcrypto.ECDHParams(),
"support":Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
zgrab_certificate_trust = SubRecord({
"type":Enum(doc="root, intermediate, or leaf certificate"),
"trusted_path":Boolean(doc="Does certificate chain up to browser root store"),
"valid":Boolean(doc="is this certificate currently valid in this browser"),
"was_valid":Boolean(doc="was this certificate ever valid in this browser")
})
_zcrypto_parsed_cert = zcrypto.ParsedCertificate()
zgrab_certificate = SubRecord({
"parsed": SubRecord({
"__expanded_names": ListOf(String()),
}, extends=_zcrypto_parsed_cert),
"validation":SubRecord({
"nss":zgrab_certificate_trust.new(category="NSS (Firefox) Validation"),
"apple":zgrab_certificate_trust.new(category="Apple Validation"),
"microsoft":zgrab_certificate_trust.new(category="Microsoft Validation"),
"android":zgrab_certificate_trust,
"java":zgrab_certificate_trust,
}),
})
zgrab_server_certificate_valid = SubRecord({
"complete_chain":Boolean(doc="does server provide a chain up to a root"),
"valid":Boolean(doc="is this certificate currently valid in this browser"),
"error":WhitespaceAnalyzedString()
})
ztag_tls_type = SubRecordType({
# This is server_hello.version.name
"version": zcrypto.TLSVersionName(),
# cipher_suite = { id: server_hello.cipher_suite.hex, name: server_hello.cipher_suite.name }
"cipher_suite": SubRecord({
"id": String(doc="The hexadecimal string representation of the numeric cipher algorithm identifier."),
"name": WhitespaceAnalyzedString(
doc="The algorithm identifier for the cipher algorithm identifier, see e.g. https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml.",
examples=["unknown", "TLS_RSA_WITH_RC4_128_MD5", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256"],
),
}),
# server_hello.ocsp_stapling
"ocsp_stapling": Boolean(),
# server_hello.secure_renegotiation
"secure_renegotiation": Boolean(),
# certificate.parsed = server_certificates.certificate.parsed
"certificate": zgrab_certificate,
# chain.parsed = [ elt.parsed for elt in server_certificates.chain ]
"chain": ListOf(zgrab_certificate),
# server_hello.scts
"scts": ListOf(zcrypto.SCTRecord()),
# session_ticket = { key: session_ticket[key] for key in ("length, "lifetime_hint") }
"session_ticket": zcrypto.SessionTicket(),
# validation = { server_certificates.validation[key] for key in ("browser_trusted", "browser_error", "matches_domain") }
"validation": zcrypto.TLSCertificateValidation(),
# server_key_exchange = { server_key_exchange[key] for key in ("ecdh_params", "dh_params", "rsa_params")
"server_key_exchange": zcrypto.ServerKeyExchange(),
# signature = ...
"signature": SubRecord({
# ... = signature.valid
"valid": Boolean(),
# ... = signature.signature_error
"signature_error": WhitespaceAnalyzedString(),
# ... = signature.signature_and_hash_type.signature_algorithm
"signature_algorithm": String(),
# ... = signature.signature_and_hash_type.hash_algorithm
"hash_algorithm": String(),
}),
"metadata": local_metadata,
"timestamp": Timestamp(),
})
ztag_tls = ztag_tls_type()
ztag_heartbleed = SubRecord({
"heartbeat_enabled":Boolean(),
"heartbleed_vulnerable":Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_smtp_starttls = SubRecord({
"banner": WhitespaceAnalyzedString(),
"ehlo": WhitespaceAnalyzedString(),
"starttls": WhitespaceAnalyzedString(),
"tls": ztag_tls,
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_mail_starttls = SubRecord({
"banner": WhitespaceAnalyzedString(),
"starttls": WhitespaceAnalyzedString(),
"tls": ztag_tls,
"metadata": local_metadata,
"timestamp":Timestamp(),
})
ztag_mail_tls = SubRecord({
"tls":ztag_tls,
"banner": WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
zgrab_unknown_http_header = SubRecord({
"key":String(),
"value":WhitespaceAnalyzedString()
})
zgrab_http_headers = SubRecord({
"access_control_allow_origin":WhitespaceAnalyzedString(),
"accept_patch":WhitespaceAnalyzedString(),
"accept_ranges":WhitespaceAnalyzedString(),
"age":WhitespaceAnalyzedString(),
"allow":WhitespaceAnalyzedString(),
"alt_svc":WhitespaceAnalyzedString(),
"alternate_protocol":WhitespaceAnalyzedString(),
"cache_control":WhitespaceAnalyzedString(),
"connection":WhitespaceAnalyzedString(),
"content_disposition":WhitespaceAnalyzedString(),
"content_encoding":WhitespaceAnalyzedString(),
"content_language":WhitespaceAnalyzedString(),
"content_length":WhitespaceAnalyzedString(),
"content_location":WhitespaceAnalyzedString(),
"content_md5":WhitespaceAnalyzedString(),
"content_range":WhitespaceAnalyzedString(),
"content_type":WhitespaceAnalyzedString(),
"date":WhitespaceAnalyzedString(),
"etag":WhitespaceAnalyzedString(),
"expires":WhitespaceAnalyzedString(),
"last_modified":WhitespaceAnalyzedString(),
"link":WhitespaceAnalyzedString(),
"location":WhitespaceAnalyzedString(),
"p3p":WhitespaceAnalyzedString(),
"pragma":WhitespaceAnalyzedString(),
"proxy_authenticate":WhitespaceAnalyzedString(),
"public_key_pins":WhitespaceAnalyzedString(),
"refresh":WhitespaceAnalyzedString(),
"referer":WhitespaceAnalyzedString(),
"retry_after":WhitespaceAnalyzedString(),
"server":WhitespaceAnalyzedString(),
"set_cookie":WhitespaceAnalyzedString(),
"status":WhitespaceAnalyzedString(),
"strict_transport_security":WhitespaceAnalyzedString(),
"trailer":WhitespaceAnalyzedString(),
"transfer_encoding":WhitespaceAnalyzedString(),
"upgrade":WhitespaceAnalyzedString(),
"vary":WhitespaceAnalyzedString(),
"via":WhitespaceAnalyzedString(),
"warning":WhitespaceAnalyzedString(),
"www_authenticate":WhitespaceAnalyzedString(),
"x_frame_options":WhitespaceAnalyzedString(),
"x_xss_protection":WhitespaceAnalyzedString(),
"content_security_policy":WhitespaceAnalyzedString(),
"x_content_security_policy":WhitespaceAnalyzedString(),
"x_webkit_csp":WhitespaceAnalyzedString(),
"x_content_type_options":WhitespaceAnalyzedString(),
"x_powered_by":WhitespaceAnalyzedString(),
"x_ua_compatible":WhitespaceAnalyzedString(),
"x_content_duration":WhitespaceAnalyzedString(),
"x_forwarded_for":WhitespaceAnalyzedString(),
"x_real_ip":WhitespaceAnalyzedString(),
"proxy_agent":WhitespaceAnalyzedString(),
"unknown":ListOf(zgrab_unknown_http_header)
})
ztag_http = SubRecord({
"status_code":Unsigned16BitInteger(),
"status_line":WhitespaceAnalyzedString(),
"body":HTML(),
"headers":zgrab_http_headers,
"body_sha256":HexString(validation_policy="warn"),
"title":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
golang_crypto_param = SubRecord({
"value":IndexedBinary(),
"length":Unsigned32BitInteger()
})
#ztag_open_proxy = SubRecord({
# "connect":SubRecord({
# "status_code":Integer(),
# "status_line":WhitespaceAnalyzedString(),
# "body":WhitespaceAnalyzedString(),
# "headers":zgrab_http_headers
# }),
# "get":SubRecord({
# "status_code":Integer(),
# "status_line":WhitespaceAnalyzedString(),
# "body":WhitespaceAnalyzedString(),
# "headers":zgrab_http_headers,
# "random_present":Boolean(),
# "body_sha256":HexString()
# }),
# "metadata":local_metadata
#})
# 2018/09/07: Workaround for mis-typed CertType.id field in ES; actual type is uint32, current ES
# type is keyword (string).
ssh_certkey_public_key_type = zgrab2_ssh.CertType(exclude={"bigquery"})
ssh_certkey_public_key_type["id"].set("exclude",
ssh_certkey_public_key_type["id"].exclude |
{"elasticsearch"})
ztag_ssh_v2 = SubRecord({
"metadata": local_metadata,
"timestamp": Timestamp(),
"banner": zgrab2_ssh.AnalyzedEndpointID(),
# This is a massaged version of zgrab2_ssh.KexInitMessage
"support": SubRecord({
"kex_algorithms": zgrab2_ssh.KexAlgorithms(),
"host_key_algorithms": zgrab2_ssh.KeyAlgorithms(),
"first_kex_follows": Boolean(),
"client_to_server": SubRecord({
"ciphers": zgrab2_ssh.CipherAlgorithms(),
"macs": zgrab2_ssh.MACAlgorithms(),
"compressions": zgrab2_ssh.CompressionAlgorithms(),
"languages": zgrab2_ssh.LanguageTags(),
}),
"server_to_client":SubRecord({
"ciphers": zgrab2_ssh.CipherAlgorithms(),
"macs": zgrab2_ssh.MACAlgorithms(),
"compressions": zgrab2_ssh.CompressionAlgorithms(),
"languages": zgrab2_ssh.LanguageTags(),
}),
}),
# This is a massaged version of zgrab2_ssh.AlgorithmSelection
"selected": SubRecord({
"kex_algorithm": zgrab2_ssh.KexAlgorithm(),
"host_key_algorithm": zgrab2_ssh.KeyAlgorithm(),
"client_to_server": zgrab2_ssh.DirectionAlgorithms(),
"server_to_client": zgrab2_ssh.DirectionAlgorithms(),
}),
"key_exchange": zgrab2_ssh.KeyExchange(),
# This is a massaged version of zgrab2_ssh.SSHPublicKeyCert
"server_host_key": SubRecord({
"key_algorithm": zgrab2_ssh.KeyAlgorithm(),
"fingerprint_sha256": HexString(),
"rsa_public_key": zcrypto.RSAPublicKey(),
"dsa_public_key": zcrypto.DSAPublicKey(),
"ecdsa_public_key": zcrypto.ECDSAPublicKey(),
"ed25519_public_key": zgrab2_ssh.ED25519PublicKey(),
"certkey_public_key": SubRecord({
# "nonce" is an IndexedBinary here, not a Binary()
"nonce": IndexedBinary(),
# This is an SSHPublicKey ("algorithm", not "key_algorithm")
"key": zgrab2_ssh.SSHPublicKey(),
"serial": String(),
# "cert_type" is renamed to "type"
"type": ssh_certkey_public_key_type,
"key_id": String(),
"valid_principals": ListOf(String()),
"validity": SubRecord({
# These are DateTimes in SSHPublicKeyCert
"valid_after": Timestamp(doc="Timestamp of when certificate is first valid. Timezone is UTC."),
"valid_before": Timestamp(doc="Timestamp of when certificate expires. Timezone is UTC."),
"length": Signed64BitInteger(),
}),
"reserved": Binary(),
"signature_key": SubRecord({
"key_algorithm": zgrab2_ssh.KeyAlgorithm(),
"fingerprint_sha256": HexString(),
"rsa_public_key": zcrypto.RSAPublicKey(),
"dsa_public_key": zcrypto.DSAPublicKey(),
"ecdsa_public_key": zcrypto.ECDSAPublicKey(),
"ed25519_public_key": zgrab2_ssh.ED25519PublicKey(),
}),
"signature": SubRecord({
"signature_algorithm": SubRecord({
"name": zgrab2_ssh.KeyAlgorithm(),
}),
"value": Binary(),
}),
"parse_error": String(),
# Flattens known/unknown
"extensions":SubRecord({
"permit_X11_forwarding": Boolean(),
"permit_agent_forwarding": Boolean(),
"permit_port_forwarding": Boolean(),
"permit_pty": Boolean(),
"permit_user_rc": Boolean(),
"unknown": ListOf(String()),
}),
# Flattens known/unknown
"critical_options": SubRecord({
"force_command": Boolean(),
"source_address": Boolean(),
"unknown": ListOf(String()),
}),
}),
}),
})
ztag_ftp = SubRecord({
"banner":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
telnet_caps_list = ListOf(SubRecord({
"name":String(),
"value":Unsigned32BitInteger()
}))
ztag_telnet = SubRecord({
"support":Boolean(),
"banner":WhitespaceAnalyzedString(),
"will":telnet_caps_list,
"wont":telnet_caps_list,
"do":telnet_caps_list,
"dont":telnet_caps_list,
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_modbus = SubRecord({
"support":Boolean(),
"function_code":Unsigned16BitInteger(),
"mei_response":SubRecord({
"conformity_level":Signed32BitInteger(),
"objects":SubRecord({
"vendor":WhitespaceAnalyzedString(),
"product_code":WhitespaceAnalyzedString(),
"revision":WhitespaceAnalyzedString(),
"vendor_url":URL(),
"product_name":WhitespaceAnalyzedString(),
"model_name":WhitespaceAnalyzedString(),
"user_application_name":WhitespaceAnalyzedString(),
})
}),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_bacnet = SubRecord({
"support":Boolean(),
"instance_number": Signed32BitInteger(),
"vendor": SubRecord({
"id": Signed32BitInteger(),
"reported_name":WhitespaceAnalyzedString(),
"official_name":WhitespaceAnalyzedString(),
}),
"firmware_revision": String(),
"application_software_revision":String(),
"object_name":WhitespaceAnalyzedString(),
"model_name":WhitespaceAnalyzedString(),
"description":WhitespaceAnalyzedString(),
"location":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_dns_question = SubRecord({
"name":String(),
"type":String()
})
ztag_dns_answer = SubRecord({
"name":String(),
"response":WhitespaceAnalyzedString(),
"type":String()
})
ztag_dns_lookup = SubRecord({
"support":Boolean(),
"errors":Boolean(),
"open_resolver":Boolean(),
"resolves_correctly":Boolean(),
"answers":ListOf(ztag_dns_answer),
"authorities":ListOf(ztag_dns_answer),
"additionals":ListOf(ztag_dns_answer),
"questions":ListOf(ztag_dns_question),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_tls_support = SubRecord({
"support": Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_fox = SubRecord({
"support":Boolean(),
"version":WhitespaceAnalyzedString(),
"id":Signed32BitInteger(),
"hostname":WhitespaceAnalyzedString(),
"host_address":WhitespaceAnalyzedString(),
"app_name":WhitespaceAnalyzedString(),
"app_version":WhitespaceAnalyzedString(),
"vm_name":WhitespaceAnalyzedString(),
"vm_version":WhitespaceAnalyzedString(),
"os_name":WhitespaceAnalyzedString(),
"os_version":WhitespaceAnalyzedString(),
"station_name":WhitespaceAnalyzedString(),
"language":WhitespaceAnalyzedString(),
"time_zone":WhitespaceAnalyzedString(),
"host_id":WhitespaceAnalyzedString(),
"vm_uuid":WhitespaceAnalyzedString(),
"brand_id":WhitespaceAnalyzedString(),
"sys_info":WhitespaceAnalyzedString(),
"auth_agent_type":String(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_dnp3 = SubRecord({
"support":Boolean(),
"raw_response":Binary(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_s7 = SubRecord({
"support":Boolean(),
"system":WhitespaceAnalyzedString(),
"module":WhitespaceAnalyzedString(),
"plant_id":WhitespaceAnalyzedString(),
"copyright":WhitespaceAnalyzedString(),
"serial_number":WhitespaceAnalyzedString(),
"reserved_for_os":WhitespaceAnalyzedString(),
"module_type":WhitespaceAnalyzedString(),
"memory_serial_number":WhitespaceAnalyzedString(),
"cpu_profile":WhitespaceAnalyzedString(),
"oem_id":WhitespaceAnalyzedString(),
"location":WhitespaceAnalyzedString(),
"module_id":WhitespaceAnalyzedString(),
"hardware":WhitespaceAnalyzedString(),
"firmware":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_smb = SubRecord({
"smbv1_support":Boolean(),
"metadata":local_metadata,
})
ztag_upnp_discovery = SubRecord({
"usn": WhitespaceAnalyzedString(),
"agent": WhitespaceAnalyzedString(),
"st": WhitespaceAnalyzedString(),
"ext": WhitespaceAnalyzedString(),
"location": WhitespaceAnalyzedString(),
"server": WhitespaceAnalyzedString(),
"cache_control": WhitespaceAnalyzedString(),
"x_user_agent": WhitespaceAnalyzedString(),
"metadata": local_metadata,
})
# Add the common zgrab2 fields to the results schema which are added by
# ZGrab2Transform._transform_object().
def ztag_zgrab2_transformed(service, results):
results["supported"] = Boolean(doc="If true, %s was detected on this machine." % service)
results["metadata"] = local_metadata
return results
# The oracle ztag transform is a plain copy of the "handshake" field.
ztag_oracle = ztag_zgrab2_transformed(service="Oracle", results=zgrab2_oracle.oracle_scan_response["result"]["handshake"])
ztag_oracle["tls"] = ztag_tls_type(doc="The TLS handshake with the server (if applicable).")
ztag_mssql = ztag_zgrab2_transformed(service="MSSQL", results=SubRecord({
"version": WhitespaceAnalyzedString(doc="The MSSQL version returned by the server in "
"the PRELOGIN response. Its format is "
"'MAJOR.MINOR.BUILD_NUMBER'."),
"instance_name": WhitespaceAnalyzedString(doc="The value of the INSTANCE field "
"returned by the server in the PRELOGIN "
"response."),
"encrypt_mode": Enum(values=zgrab2_mssql.ENCRYPT_MODES,
doc="The negotiated encryption mode for the session. "
"See https://msdn.microsoft.com/en-us/library/dd357559.aspx "
"for details."),
"tls": ztag_tls_type(doc="The TLS handshake with the server (for "
"non-encrypted connections, this used only "
"for the authentication phase).")
}))
ztag_mysql = ztag_zgrab2_transformed(service="MySQL", results=SubRecord({
"protocol_version": zgrab2.mysql.mysql_scan_response["result"]["protocol_version"],
"server_version": zgrab2.mysql.mysql_scan_response["result"]["server_version"],
"capability_flags": zgrab2.mysql.mysql_capability_flags,
"status_flags": zgrab2.mysql.mysql_server_status_flags,
"error_code": zgrab2.mysql.mysql_scan_response["result"]["error_code"],
"error_id": zgrab2.mysql.mysql_scan_response["result"]["error_id"],
"error_message": zgrab2.mysql.mysql_scan_response["result"]["error_message"],
"tls": ztag_tls_type(doc="If the server allows upgrading the "
"session to use TLS, this is the log of "
"the handshake.")
}))
ztag_mongodb = ztag_zgrab2_transformed(service="MongoDB", results=SubRecord({
"build_info": SubRecord({
"version": WhitespaceAnalyzedString(doc="Version of mongodb server"),
"git_version": WhitespaceAnalyzedString(doc="Git Version of mongodb server"),
"max_wire_version": Signed32BitInteger(),
"build_environment": SubRecord({
"dist_mod": WhitespaceAnalyzedString(),
"dist_arch": WhitespaceAnalyzedString(),
"cc": WhitespaceAnalyzedString(),
"cc_flags": WhitespaceAnalyzedString(),
"cxx": WhitespaceAnalyzedString(),
"cxx_flags": WhitespaceAnalyzedString(),
"link_flags": WhitespaceAnalyzedString(),
"target_arch": WhitespaceAnalyzedString(),
"target_os": WhitespaceAnalyzedString()
})
}, doc="Result of issuing the buildInfo command see https://docs.mongodb.com/manual/reference/command/buildInfo"),
"is_master": SubRecord({
"is_master": Boolean(),
"max_wire_version": Signed32BitInteger(),
"min_wire_version": Signed32BitInteger(),
"max_bson_object_size": Signed32BitInteger(),
"max_write_batch_size": Signed32BitInteger(),
"logical_session_timeout_minutes": Signed32BitInteger(),
"max_message_size_bytes": Signed32BitInteger(),
"read_only": Boolean()
}, doc="Result of issuing the isMaster command see https://docs.mongodb.com/manual/reference/command/isMaster")
}))
ztag_postgres = ztag_zgrab2_transformed(service="PostgreSQL", results=SubRecord({
"supported_versions": WhitespaceAnalyzedString(doc="The error string returned by the "
"server in response to a "
"StartupMessage with "
"ProtocolVersion = 0.0"),
"protocol_error": zgrab2.postgres.postgres_error,
"startup_error": zgrab2.postgres.postgres_error,
"is_ssl": Boolean(doc="If the server supports TLS and the session was "
"updated to use TLS, this is true."),
"authentication_mode": zgrab2.postgres.postgres_auth_mode["mode"],
"backend_key_data": zgrab2.postgres.postgres_key_data,
"tls": ztag_tls_type(doc="If the server allows upgrading the "
"session to use TLS, this is the log of "
"the handshake.")
}))
ztag_ipp = ztag_zgrab2_transformed(service="IPP", results=SubRecord({
"version_major": zgrab2.ipp.ipp_scan_response["result"]["version_major"],
"version_minor": zgrab2.ipp.ipp_scan_response["result"]["version_minor"],
"version_string": zgrab2.ipp.ipp_scan_response["result"]["version_string"],
"cups_version": zgrab2.ipp.ipp_scan_response["result"]["cups_version"],
"attributes": zgrab2.ipp.ipp_scan_response["result"]["attributes"],
"attr_ipp_versions": zgrab2.ipp.ipp_scan_response["result"]["attr_ipp_versions"],
"attr_cups_version": zgrab2.ipp.ipp_scan_response["result"]["attr_cups_version"],
"attr_printer_uris": zgrab2.ipp.ipp_scan_response["result"]["attr_printer_uris"],
"tls": ztag_tls_type(doc="If the server allows upgrading the "
"session to use TLS, this is the log of "
"the handshake."),
}))
ztag_schemas = [
("ztag_https", ztag_tls),
("ztag_heartbleed", ztag_heartbleed),
("ztag_smtp_starttls", ztag_smtp_starttls),
("ztag_imap_starttls", ztag_mail_starttls),
("ztag_pop3_starttls", ztag_mail_starttls),
("ztag_imap_tls", ztag_mail_tls),
("ztag_pop3_tls", ztag_mail_tls),
("ztag_http", ztag_http),
("ztag_ftp", ztag_ftp),
("ztag_dh", ztag_dh),
("ztag_dh_export", ztag_dh_export),
("ztag_rsa_export", ztag_rsa_export),
("ztag_ecdh", ztag_ecdh),
("ztag_sslv3", ztag_tls_support),
("ztag_tls1", ztag_tls_support),
("ztag_tls2", ztag_tls_support),
("ztag_tls3", ztag_tls_support),
("ztag_modbus", ztag_modbus),
("ztag_ssh_v2", ztag_ssh_v2),
("ztag_dns_lookup", ztag_dns_lookup),
("ztag_bacnet", ztag_bacnet),
("ztag_fox", ztag_fox),
("ztag_dnp3", ztag_dnp3),
("ztag_s7", ztag_s7),
("ztag_smb", ztag_smb),
("ztag_upnp_discovery", ztag_upnp_discovery),
("ztag_oracle", ztag_oracle),
("ztag_mssql", ztag_mssql),
("ztag_ipp", ztag_ipp),
("ztag_mongodb", ztag_mongodb),
]
for (name, schema) in ztag_schemas:
x = Record({
"ip_address":IPAddress(required=True),
#"timestamp":Timestamp(required=True),
"tags":ListOf(String()),
"metadata": SubRecord({}, allow_unknown=True),
}, extends=schema)
zschema.registry.register_schema("%s" % name, x)
ztag_lookup_spf = SubRecord({
"raw":WhitespaceAnalyzedString(),
})
ztag_lookup_dmarc = SubRecord({
"raw":WhitespaceAnalyzedString(),
"p":String(),
})
ztag_lookup_axfr = SubRecord({
"servers":ListOf(SubRecord({
"server":String(),
"status":String(),
"name":FQDN(),
"support":Boolean(),
"error":WhitespaceAnalyzedString(),
"records":ListOf(SubRecord({
"algorithm":Unsigned16BitInteger(),
"answer":String(),
"class":String(),
"data":WhitespaceAnalyzedString(),
"digest":WhitespaceAnalyzedString(),
"digest_type":Unsigned16BitInteger(),
"expire":Unsigned32BitInteger(),
"flag":Unsigned16BitInteger(),
"flags":Unsigned16BitInteger(),
"key_tag":Unsigned16BitInteger(),
"mbox":FQDN(),
"min_ttl":Unsigned32BitInteger(),
"name":FQDN(),
"ns":FQDN(),
"preference":Signed16BitInteger(),
"protocol":Unsigned16BitInteger(),
"public_key":String(),
"refresh":Signed32BitInteger(),
"retry":Signed32BitInteger(),
"serial":Unsigned32BitInteger(),
"tag":String(),
"type":String(),
"ttl":Unsigned32BitInteger(),
# FIXME 2018/10/15: Conflict with auto-detected version in Elasticsearch (auto type
# FIXME 2018/10/15: is text, new type is keyword)
"value": String(exclude={"elasticsearch"}),
})),
})),
"truncated":Boolean(),
"support":Boolean()
})
_zdb_location_fields = {
"continent":String(),
"country":WhitespaceAnalyzedString(),
"country_code":String(),
"city":WhitespaceAnalyzedString(),
"postal_code":String(),
"timezone":WhitespaceAnalyzedString(),
"province":WhitespaceAnalyzedString(),
"latitude":Double(),
"longitude":Double(),
"registered_country":WhitespaceAnalyzedString(),
"registered_country_code":String(),
}
zdb_location = SubRecord(_zdb_location_fields, category="Location")
zdb_restricted_location = SubRecord(_zdb_location_fields, exclude=["bigquery",])
zdb_as = SubRecord({
"asn":Unsigned32BitInteger(),
"description":WhitespaceAnalyzedString(),
"path":ListOf(Unsigned32BitInteger()),
"rir":String(),
"routed_prefix":FQDN(),
"name":WhitespaceAnalyzedString(),
"country_code":String(),
"organization":WhitespaceAnalyzedString(),
})
__metadata = {}
for key in Annotation.GLOBAL_METADATA_KEYS:
__metadata[key] = WhitespaceAnalyzedString()
zdb_metadata = SubRecord(__metadata)
CTServerStatus = SubRecord({
"index":Signed64BitInteger(),
"added_to_ct_at":Timestamp(),
"ct_to_censys_at":Timestamp(),
"censys_to_ct_at":Timestamp(),
"sct":IndexedBinary(),
})
CTStatus = SubRecord({
"google_aviator":CTServerStatus,
"google_pilot":CTServerStatus,
"google_rocketeer":CTServerStatus,
"google_submariner":CTServerStatus,
"google_testtube":CTServerStatus,
"google_icarus":CTServerStatus,
"google_skydiver":CTServerStatus,
"google_daedalus":CTServerStatus,
"digicert_ct1":CTServerStatus,
"izenpe_com_ct":CTServerStatus,
"izenpe_eus_ct":CTServerStatus,
"symantec_ws_ct":CTServerStatus,
"symantec_ws_vega":CTServerStatus,
"wosign_ctlog":CTServerStatus,
"wosign_ct":CTServerStatus,
"cnnic_ctserver":CTServerStatus,
"gdca_ct":CTServerStatus,
"startssl_ct":CTServerStatus,
"certly_log":CTServerStatus,
"venafi_api_ctlog":CTServerStatus,
"symantec_ws_deneb":CTServerStatus,
"nordu_ct_plausible":CTServerStatus,
"comodo_dodo":CTServerStatus,
"comodo_mammoth":CTServerStatus,
"gdca_ctlog":CTServerStatus,
"symantec_ws_sirius":CTServerStatus,
"certificatetransparency_cn_ct":CTServerStatus,
"venafi_api_ctlog_gen2":CTServerStatus,
"digicert_ct2":CTServerStatus,
"comodo_sabre":CTServerStatus,
"sheca_ct":CTServerStatus,
"letsencrypt_ct_clicky":CTServerStatus,
"behind_the_sofa":CTServerStatus,
"gdca_log":CTServerStatus,
"gdca_log2":CTServerStatus,
"wotrus_ctlog":CTServerStatus,
"wotrus_ctlog3":CTServerStatus,
"akamai_ct":CTServerStatus,
"google_argon_2017":CTServerStatus,
"google_argon_2018":CTServerStatus,
"google_argon_2019":CTServerStatus,
"google_argon_2020":CTServerStatus,
"google_argon_2021":CTServerStatus,
"google_xenon_2018":CTServerStatus,
"google_xenon_2019":CTServerStatus,
"google_xenon_2020":CTServerStatus,
"google_xenon_2021":CTServerStatus,
"google_xenon_2022":CTServerStatus,
"cloudflare_nimbus_2017":CTServerStatus,
"cloudflare_nimbus_2018":CTServerStatus,
"cloudflare_nimbus_2019":CTServerStatus,
"cloudflare_nimbus_2020":CTServerStatus,
"cloudflare_nimbus_2021":CTServerStatus,
"digicert_nessie_2018":CTServerStatus,
"digicert_nessie_2019":CTServerStatus,
"digicert_nessie_2020":CTServerStatus,
"digicert_nessie_2021":CTServerStatus,
"digicert_nessie_2022":CTServerStatus,
"digicert_yeti_2018":CTServerStatus,
"digicert_yeti_2019":CTServerStatus,
"digicert_yeti_2020":CTServerStatus,
"digicert_yeti_2021":CTServerStatus,
"digicert_yeti_2022":CTServerStatus,
"digicert_golem":CTServerStatus,
"izenpe_com_pilot":CTServerStatus,
"letsencrypt_ct_birch":CTServerStatus,
"letsencrypt_ct_faux":CTServerStatus,
"letsencrypt_ct_oak":CTServerStatus,
"nordu_ct_flimsy":CTServerStatus,
"sheca_ctlog":CTServerStatus,
"wosign_ctlog2":CTServerStatus,
"wosign_ctlog3":CTServerStatus,
"ctlogs_alpha":CTServerStatus,
})
CertificateAudit = SubRecord({
"ccadb":SubRecord({
"current_in_intermediates":Boolean(),
"was_in_intermediates":Boolean(),
"owner_name":WhitespaceAnalyzedString(),
"parent_name":WhitespaceAnalyzedString(),
"certificate_name":WhitespaceAnalyzedString(),
"certificate_policy":WhitespaceAnalyzedString(),
"certification_practice_statement":WhitespaceAnalyzedString(),
"cp_same_as_parent":Boolean(),
"audit_same_as_parent":Boolean(),
"standard_audit":WhitespaceAnalyzedString(),
"br_audit":WhitespaceAnalyzedString(),
"auditor":WhitespaceAnalyzedString(),
"standard_audit_statement_timestamp":Timestamp(),
"management_assertions_by":WhitespaceAnalyzedString(),
"comments":EnglishString(es_include_raw=True),
"ev_policy_oids":WhitespaceAnalyzedString(),
"approval_bug":WhitespaceAnalyzedString(),
"first_nss_release":WhitespaceAnalyzedString(),
"first_firefox_release":WhitespaceAnalyzedString(),
"ev_audit":WhitespaceAnalyzedString(),
"current_in_roots":Boolean(),
"was_in_roots":Boolean(),
"test_website_valid":WhitespaceAnalyzedString(),
"mozilla_applied_constraints":WhitespaceAnalyzedString(),
"company_website":WhitespaceAnalyzedString(),
"geographic_focus":WhitespaceAnalyzedString(),
"standard_audit_type":WhitespaceAnalyzedString(),
}, category="CCADB Audit")
})
ztag_certificate_validation = SubRecord({
"valid":Boolean(doc="((has_trusted_path && !revoked && !blacklisted) || whitelisted) && !expired"),
"was_valid":Boolean(doc="True if the certificate is valid now or was ever valid in the past."),
"trusted_path":Boolean(doc="True if there exists a path from the certificate to the root store."),
"had_trusted_path":Boolean(doc="True if now or at some point in the past there existed a path "
"from the certificate to the root store."),
"blacklisted":Boolean(doc="True if the certificate is explicitly blacklisted by some method than OneCRL/CRLSet. "
"For example, a set of certificates revoked by Cloudflare are blacklisted by SPKI hash in Chrome."),
"whitelisted":Boolean(doc="True if the certificate is explicitly whitelisted, "
"e.g. the set of trusted WoSign certificates Apple uses."),
"type":Enum(["leaf","intermediate","root","unknown"], doc="Indicates if the certificate is a root, intermediate, or leaf."),
"paths":NestedListOf(HexString(), "path", validation_policy="ignore"),
"in_revocation_set":Boolean(doc="True if the certificate is in the revocation set (e.g. OneCRL) associated with this root store."),
"parents":ListOf(HexString()),
})
class LintBool(String):
ES_TYPE = "boolean"
# Lints can have any of the following outputs:
# - RESERVED [should never happen]
# - NA [not applicable]
# - NE [not applicable]
# - PASS [test success]
# - INFO [failed for info]
# - WARN [failed for warn]
# - FAIL [failed for error]
# - FATAL [test could not complete because cert is broken]
# - UNKNOWN [should never occur]
# We don't want to store a string for every lint in elasticsearch because
# our index size would explode. Instead we map these to a string:
# {
# (reserved, unknown, ne, na, pass) -> null,
# (notice, warning, fail, fatal) -> true
# }
# For BigQuery, we have more options, so we allow some more information:
# {
# all map to original value
# }
# This is horrible to schema, so define a custom type
Lints = SubRecord({
"e_basic_constraints_not_critical":LintBool(),
"e_ca_common_name_missing":LintBool(),
"e_ca_country_name_invalid":LintBool(),
"e_ca_country_name_missing":LintBool(),
"e_ca_crl_sign_not_set":LintBool(),
"e_ca_is_ca":LintBool(),
"e_ca_key_cert_sign_not_set":LintBool(),
"e_ca_key_usage_missing":LintBool(),
"e_ca_key_usage_not_critical":LintBool(),
"e_ca_organization_name_missing":LintBool(),
"e_ca_subject_field_empty":LintBool(),
"e_cab_dv_conflicts_with_locality":LintBool(),
"e_cab_dv_conflicts_with_org":LintBool(),
"e_cab_dv_conflicts_with_postal":LintBool(),
"e_cab_dv_conflicts_with_province":LintBool(),
"e_cab_dv_conflicts_with_street":LintBool(),
"e_cab_iv_requires_personal_name":LintBool(),
"e_cab_ov_requires_org":LintBool(),
"e_cert_contains_unique_identifier":LintBool(),
"e_cert_extensions_version_not_3":LintBool(),
"e_cert_policy_iv_requires_country":LintBool(),
"e_cert_policy_iv_requires_province_or_locality":LintBool(),
"e_cert_policy_ov_requires_country":LintBool(),
"e_cert_policy_ov_requires_province_or_locality":LintBool(),
"e_cert_unique_identifier_version_not_2_or_3":LintBool(),
"e_distribution_point_incomplete":LintBool(),
"e_dnsname_bad_character_in_label":LintBool(),
"e_dnsname_contains_bare_iana_suffix":LintBool(),
"e_dnsname_empty_label":LintBool(),
"e_dnsname_hyphen_in_sld":LintBool(),
"e_dnsname_label_too_long":LintBool(),
"e_dnsname_left_label_wildcard_correct":LintBool(),
"e_dnsname_not_valid_tld":LintBool(),
"e_dnsname_underscore_in_sld":LintBool(),
"e_dnsname_wildcard_only_in_left_label":LintBool(),
"e_dsa_correct_order_in_subgroup":LintBool(),
"e_dsa_improper_modulus_or_divisor_size":LintBool(),
"e_dsa_params_missing":LintBool(),
"e_dsa_shorter_than_2048_bits":LintBool(),
"e_dsa_unique_correct_representation":LintBool(),
"e_ec_improper_curves":LintBool(),
"e_ev_business_category_missing":LintBool(),
"e_ev_country_name_missing":LintBool(),
"e_ev_locality_name_missing":LintBool(),
"e_ev_organization_name_missing":LintBool(),
"e_ev_serial_number_missing":LintBool(),
"e_ev_valid_time_too_long":LintBool(),
"e_ext_aia_marked_critical":LintBool(),
"e_ext_authority_key_identifier_critical":LintBool(),
"e_ext_authority_key_identifier_missing":LintBool(),
"e_ext_authority_key_identifier_no_key_identifier":LintBool(),
"e_ext_cert_policy_disallowed_any_policy_qualifier":LintBool(),
"e_ext_cert_policy_duplicate":LintBool(),
"e_ext_cert_policy_explicit_text_ia5_string":LintBool(),
"e_ext_cert_policy_explicit_text_too_long":LintBool(),
"e_ext_duplicate_extension":LintBool(),
"e_ext_freshest_crl_marked_critical":LintBool(),
"e_ext_ian_dns_not_ia5_string":LintBool(),
"e_ext_ian_empty_name":LintBool(),
"e_ext_ian_no_entries":LintBool(),
"e_ext_ian_rfc822_format_invalid":LintBool(),
"e_ext_ian_space_dns_name":LintBool(),
"e_ext_ian_uri_format_invalid":LintBool(),
"e_ext_ian_uri_host_not_fqdn_or_ip":LintBool(),
"e_ext_ian_uri_not_ia5":LintBool(),
"e_ext_ian_uri_relative":LintBool(),
"e_ext_key_usage_cert_sign_without_ca":LintBool(),
"e_ext_key_usage_without_bits":LintBool(),
"e_ext_name_constraints_not_critical":LintBool(),
"e_ext_name_constraints_not_in_ca":LintBool(),
"e_ext_policy_constraints_empty":LintBool(),
"e_ext_policy_constraints_not_critical":LintBool(),
"e_ext_policy_map_any_policy":LintBool(),
"e_ext_san_contains_reserved_ip":LintBool(),
"e_ext_san_directory_name_present":LintBool(),
"e_ext_san_dns_name_too_long":LintBool(),
"e_ext_san_dns_not_ia5_string":LintBool(),
"e_ext_san_edi_party_name_present":LintBool(),
"e_ext_san_empty_name":LintBool(),
"e_ext_san_missing":LintBool(),
"e_ext_san_no_entries":LintBool(),
"e_ext_san_not_critical_without_subject":LintBool(),
"e_ext_san_other_name_present":LintBool(),
"e_ext_san_registered_id_present":LintBool(),
"e_ext_san_rfc822_format_invalid":LintBool(),
"e_ext_san_rfc822_name_present":LintBool(),
"e_ext_san_space_dns_name":LintBool(),
"e_ext_san_uniform_resource_identifier_present":LintBool(),
"e_ext_san_uri_format_invalid":LintBool(),
"e_ext_san_uri_host_not_fqdn_or_ip":LintBool(),
"e_ext_san_uri_not_ia5":LintBool(),
"e_ext_san_uri_relative":LintBool(),
"e_ext_subject_directory_attr_critical":LintBool(),
"e_ext_subject_key_identifier_critical":LintBool(),
"e_ext_subject_key_identifier_missing_ca":LintBool(),
"e_generalized_time_does_not_include_seconds":LintBool(),
"e_generalized_time_includes_fraction_seconds":LintBool(),
"e_generalized_time_not_in_zulu":LintBool(),
"e_ian_bare_wildcard":LintBool(),
"e_ian_dns_name_includes_null_char":LintBool(),
"e_ian_dns_name_starts_with_period":LintBool(),
"e_ian_wildcard_not_first":LintBool(),
"e_inhibit_any_policy_not_critical":LintBool(),
"e_international_dns_name_not_nfkc":LintBool(),
"e_international_dns_name_not_unicode":LintBool(),
"e_invalid_certificate_version":LintBool(),
"e_issuer_field_empty":LintBool(),
"e_name_constraint_empty":LintBool(),
"e_name_constraint_maximum_not_absent":LintBool(),
"e_name_constraint_minimum_non_zero":LintBool(),
"e_old_root_ca_rsa_mod_less_than_2048_bits":LintBool(),
"e_old_sub_ca_rsa_mod_less_than_1024_bits":LintBool(),
"e_old_sub_cert_rsa_mod_less_than_1024_bits":LintBool(),
"e_path_len_constraint_improperly_included":LintBool(),
"e_path_len_constraint_zero_or_less":LintBool(),
"e_public_key_type_not_allowed":LintBool(),
"e_root_ca_extended_key_usage_present":LintBool(),
"e_root_ca_key_usage_must_be_critical":LintBool(),
"e_root_ca_key_usage_present":LintBool(),
"e_rsa_exp_negative":LintBool(),
"e_rsa_mod_less_than_2048_bits":LintBool(),
"e_rsa_no_public_key":LintBool(),
"e_rsa_public_exponent_not_odd":LintBool(),
"e_rsa_public_exponent_too_small":LintBool(),
"e_san_bare_wildcard":LintBool(),
"e_san_dns_name_includes_null_char":LintBool(),
"e_san_dns_name_starts_with_period":LintBool(),
"e_san_wildcard_not_first":LintBool(),
"e_serial_number_longer_than_20_octets":LintBool(),
"e_serial_number_not_positive":LintBool(),
"e_signature_algorithm_not_supported":LintBool(),
"e_sub_ca_aia_does_not_contain_ocsp_url":LintBool(),
"e_sub_ca_aia_marked_critical":LintBool(),
"e_sub_ca_aia_missing":LintBool(),
"e_sub_ca_certificate_policies_missing":LintBool(),
"e_sub_ca_crl_distribution_points_does_not_contain_url":LintBool(),
"e_sub_ca_crl_distribution_points_marked_critical":LintBool(),
"e_sub_ca_crl_distribution_points_missing":LintBool(),
"e_sub_ca_eku_missing":LintBool(),
"e_sub_ca_eku_name_constraints":LintBool(),
"e_sub_ca_must_not_contain_any_policy":LintBool(),
"e_sub_cert_aia_does_not_contain_ocsp_url":LintBool(),
"e_sub_cert_aia_marked_critical":LintBool(),
"e_sub_cert_aia_missing":LintBool(),
"e_sub_cert_cert_policy_empty":LintBool(),
"e_sub_cert_certificate_policies_missing":LintBool(),
"e_sub_cert_country_name_must_appear":LintBool(),
"e_sub_cert_crl_distribution_points_does_not_contain_url":LintBool(),
"e_sub_cert_crl_distribution_points_marked_critical":LintBool(),
"e_sub_cert_eku_missing":LintBool(),
"e_sub_cert_eku_server_auth_client_auth_missing":LintBool(),
"e_sub_cert_given_name_surname_contains_correct_policy":LintBool(),
"e_sub_cert_key_usage_cert_sign_bit_set":LintBool(),
"e_sub_cert_key_usage_crl_sign_bit_set":LintBool(),
"e_sub_cert_locality_name_must_appear":LintBool(),
"e_sub_cert_locality_name_must_not_appear":LintBool(),
"e_sub_cert_not_is_ca":LintBool(),
"e_sub_cert_or_sub_ca_using_sha1":LintBool(),
"e_sub_cert_postal_code_must_not_appear":LintBool(),
"e_sub_cert_province_must_appear":LintBool(),
"e_sub_cert_province_must_not_appear":LintBool(),
"e_sub_cert_street_address_should_not_exist":LintBool(),
"e_sub_cert_valid_time_too_long":LintBool(),
"e_subject_common_name_max_length":LintBool(),
"e_subject_common_name_not_from_san":LintBool(),
"e_subject_contains_noninformational_value":LintBool(),
"e_subject_contains_reserved_ip":LintBool(),
"e_subject_country_not_iso":LintBool(),
"e_subject_empty_without_san":LintBool(),
"e_subject_info_access_marked_critical":LintBool(),
"e_subject_locality_name_max_length":LintBool(),
"e_subject_not_dn":LintBool(),
"e_subject_organization_name_max_length":LintBool(),
"e_subject_organizational_unit_name_max_length":LintBool(),
"e_subject_state_name_max_length":LintBool(),
"e_utc_time_does_not_include_seconds":LintBool(),
"e_utc_time_not_in_zulu":LintBool(),
"e_validity_time_not_positive":LintBool(),
"e_wrong_time_format_pre2050":LintBool(),
"n_ca_digital_signature_not_set":LintBool(),
"n_contains_redacted_dnsname":LintBool(),
"n_sub_ca_eku_not_technically_constrained":LintBool(),
"n_subject_common_name_included":LintBool(),
"w_distribution_point_missing_ldap_or_uri":LintBool(),
"w_dnsname_underscore_in_trd":LintBool(),
"w_dnsname_wildcard_left_of_public_suffix":LintBool(),
"w_eku_critical_improperly":LintBool(),
"w_ext_aia_access_location_missing":LintBool(),
"w_ext_cert_policy_contains_noticeref":LintBool(),
"w_ext_cert_policy_explicit_text_includes_control":LintBool(),
"w_ext_cert_policy_explicit_text_not_nfc":LintBool(),
"w_ext_cert_policy_explicit_text_not_utf8":LintBool(),
"w_ext_crl_distribution_marked_critical":LintBool(),
"w_ext_ian_critical":LintBool(),
"w_ext_key_usage_not_critical":LintBool(),
"w_ext_policy_map_not_critical":LintBool(),
"w_ext_policy_map_not_in_cert_policy":LintBool(),
"w_ext_san_critical_with_subject_dn":LintBool(),
"w_ext_subject_key_identifier_missing_sub_cert":LintBool(),
"w_ian_iana_pub_suffix_empty":LintBool(),
"w_issuer_dn_leading_whitespace":LintBool(),
"w_issuer_dn_trailing_whitespace":LintBool(),
"w_multiple_issuer_rdn":LintBool(),
"w_multiple_subject_rdn":LintBool(),
"w_name_constraint_on_edi_party_name":LintBool(),
"w_name_constraint_on_registered_id":LintBool(),
"w_name_constraint_on_x400":LintBool(),
"w_root_ca_basic_constraints_path_len_constraint_field_present":LintBool(),
"w_root_ca_contains_cert_policy":LintBool(),
"w_rsa_mod_factors_smaller_than_752":LintBool(),
"w_rsa_mod_not_odd":LintBool(),
"w_rsa_public_exponent_not_in_range":LintBool(),
"w_san_iana_pub_suffix_empty":LintBool(),
"w_serial_number_low_entropy":LintBool(),
"w_sub_ca_aia_does_not_contain_issuing_ca_url":LintBool(),
"w_sub_ca_certificate_policies_marked_critical":LintBool(),
"w_sub_ca_eku_critical":LintBool(),
"w_sub_ca_name_constraints_not_critical":LintBool(),
"w_sub_cert_aia_does_not_contain_issuing_ca_url":LintBool(),
"w_sub_cert_certificate_policies_marked_critical":LintBool(),
"w_sub_cert_eku_extra_values":LintBool(),
"w_sub_cert_sha1_expiration_too_long":LintBool(),
"w_subject_dn_leading_whitespace":LintBool(),
"w_subject_dn_trailing_whitespace":LintBool(),
}, validation_policy="ignore")
ZLint = SubRecord({
# version is an int64 in the protobuf
"version":Unsigned16BitInteger(validation_policy="ignore"),
"notices_present":Boolean(),
"warnings_present":Boolean(),
"errors_present":Boolean(),
"fatals_present":Boolean(),
"lints":Lints,
})
certificate = Record({
"parsed": SubRecord({
"__expanded_names": ListOf(String()),
}, extends=zcrypto.ParsedCertificate()),
"raw":Binary(),
"fingerprint_sha256":HexString(),
"tags":ListOf(WhitespaceAnalyzedString()),
"metadata":SubRecord({
"updated_at":Timestamp(),
"added_at":Timestamp(),
"post_processed":Boolean(),
"post_processed_at":Timestamp(),
"seen_in_scan":Boolean(),
"source":String(),
"parse_version":Unsigned16BitInteger(),
"parse_error":WhitespaceAnalyzedString(),
"parse_status":String(),
}, category="Metadata"),
"parents":ListOf(String(), category="Misc"),
"parent_spki_subject_fingerprint":HexString(),
"validation":SubRecord({
"nss":ztag_certificate_validation.new(category="NSS (Firefox) Validation"),
"apple":ztag_certificate_validation.new(category="Apple Validation"),
"microsoft":ztag_certificate_validation.new(category="Microsoft Validation"),
#"java":ztag_certificate_validation,
#"android":ztag_certificate_validation,
"google_ct_primary":ztag_certificate_validation.new(category="Google CT Validation"),
#"google_ct_submariner":ztag_certificate_validation,
}),
"ct":CTStatus.new(category="Certificate Transparency Logs"),
# TODO: 2018/08/14 -- ccadb data is not being loaded, so hold off on creating this schema.
# "audit":CertificateAudit,
"zlint":ZLint.new(category="ZLint"),
"precert":Boolean(category="Misc")
})
zschema.registry.register_schema("certificate", certificate)
ipv4_host = Record({
Port(443):SubRecord({
"https":SubRecord({
"tls":ztag_tls,
"get":ztag_http,
"heartbleed":ztag_heartbleed,
"dhe": ztag_dh,
"rsa_export": ztag_rsa_export,
"dhe_export": ztag_dh_export,
"ssl_3": ztag_tls_support,
"tls_1_1": ztag_tls_support,
"tls_1_2": ztag_tls_support,
"ecdhe": ztag_ecdh,
}, category="443/HTTPS")
}),
Port(80):SubRecord({
"http":SubRecord({
"get":ztag_http,
}, category="80/HTTP"),
}),
Port(8080):SubRecord({
"http":SubRecord({
"get":ztag_http,
}, category="8080/HTTP"),
}),
Port(8888):SubRecord({
"http":SubRecord({
"get":ztag_http,
}, category="8888/HTTP"),
}),
Port(25):SubRecord({
"smtp":SubRecord({
"starttls": ztag_smtp_starttls,
}, category="25/SMTP"),
}),
Port(23):SubRecord({
"telnet":SubRecord({
"banner":ztag_telnet
}, category="23/Telnet")
}),
Port(2323):SubRecord({
"telnet":SubRecord({
"banner":ztag_telnet
}, category="2323/Telnet")
}),
Port(21):SubRecord({
"ftp":SubRecord({
"banner":ztag_ftp,
}, category="21/FTP")
}),
Port(102):SubRecord({
"s7":SubRecord({
"szl":ztag_s7
}, category="102/S7")
}),
Port(110):SubRecord({
"pop3":SubRecord({
"starttls":ztag_mail_starttls,
}, category="110/POP3")
}),
Port(143):SubRecord({
"imap":SubRecord({
"starttls":ztag_mail_starttls,
}, category="143/IMAP")
}),
Port(445):SubRecord({
"smb":SubRecord({
"banner":ztag_smb
}, category="445/SMB", validation_policy="error")
}),
Port(993):SubRecord({
"imaps":SubRecord({
"tls":ztag_mail_tls,
}, category="993/IMAPS")
}),
Port(995):SubRecord({
"pop3s":SubRecord({
"tls":ztag_mail_tls,
}, category="995/POP3S")
}),
Port(587):SubRecord({
"smtp":SubRecord({
"starttls": ztag_smtp_starttls,
}, category="587/SMTP")
}),
Port(502):SubRecord({
"modbus":SubRecord({
"device_id":ztag_modbus
}, category="502/Modbus")
}),
Port(22):SubRecord({
"ssh":SubRecord({
"v2": ztag_ssh_v2
}, category="22/SSH"),
}),
Port(53):SubRecord({
"dns":SubRecord({
"lookup":ztag_dns_lookup
}, category="53/DNS")
}),
Port(47808):SubRecord({
"bacnet":SubRecord({
"device_id":ztag_bacnet
}, category="47808/BACNET")
}),
Port(1911):SubRecord({
"fox":SubRecord({
"device_id":ztag_fox
}, category="1911/Fox")
}),
Port(20000):SubRecord({
"dnp3":SubRecord({
"status":ztag_dnp3,
}, category="20000/DNP3")
}),
Port(7547):SubRecord({
"cwmp":SubRecord({
"get":ztag_http,
}, category="7547/CWMP")
}),
Port(1900):SubRecord({
"upnp":SubRecord({
"discovery":ztag_upnp_discovery,
}, category="1900/UPnP")
}),
Port(1521):SubRecord({
"oracle":SubRecord({
"banner": ztag_oracle,
}, category="1521/Oracle"),
}),
Port(1433):SubRecord({
"mssql":SubRecord({
"banner": ztag_mssql,
}, category="1433/MSSQL"),
}),
Port(3306): SubRecord({
"mysql": SubRecord({
"banner": ztag_mysql,
}, category="3306/MySQL"),
}),
Port(27017): SubRecord({
"mongodb": SubRecord({
"banner": ztag_mongodb ,
}, category="27017/MongoDB"),
}),
Port(5432): SubRecord({
"postgres": SubRecord({
"banner": ztag_postgres,
}, category="5432/Postgres"),
}),
Port(631): SubRecord({
"ipp": SubRecord({
"banner": ztag_ipp,
}, category="631/IPP"),
}),
"tags":ListOf(WhitespaceAnalyzedString(), category="Basic Information"),
"metadata":zdb_metadata,
"location":zdb_location,
"__restricted_location":zdb_restricted_location,
"autonomous_system":zdb_as.new(category="Basic Information"),
"notes":WhitespaceAnalyzedString(),
"ip":IPv4Address(required=True, category="Basic Information"),
"ipint":Unsigned32BitInteger(required=True, doc="Integer value of IP address in host order"),
"updated_at":Timestamp(),
"zdb_version":Unsigned32BitInteger(),
"protocols":ListOf(String(), category="Basic Information"),
"ports":ListOf(Unsigned16BitInteger())
})
website = Record({
Port(443):SubRecord({
"https":SubRecord({
"get":ztag_http,
"tls":ztag_tls,
"heartbleed":ztag_heartbleed,
"dhe": ztag_dh,
"rsa_export": ztag_rsa_export,
"dhe_export": ztag_dh_export,
"ssl_3": ztag_tls_support,
"tls_1_1": ztag_tls_support,
"tls_1_2": ztag_tls_support,
"ecdhe": ztag_ecdh,
}),
"https_www":SubRecord({
"tls":ztag_tls,
"get":ztag_http,
})
}, category="443/HTTPS"),
Port(80):SubRecord({
"http":SubRecord({
"get":ztag_http,
}),
"http_www":SubRecord({
"get":ztag_http,
}),
}, category="80/HTTP"),
Port(25):SubRecord({
"smtp":SubRecord({
"starttls": ztag_smtp_starttls,
})
}, category="25/SMTP"),
Port(0):SubRecord({
"lookup":SubRecord({
"spf":ztag_lookup_spf,
"dmarc":ztag_lookup_dmarc,
"axfr":ztag_lookup_axfr,
})
}, category="Basic Information"),
"tags":ListOf(WhitespaceAnalyzedString(), category="Basic Information"),
"metadata":zdb_metadata,
"notes":EnglishString(es_include_raw=True),
"domain":String(category="Basic Information"),
"alexa_rank":Unsigned32BitInteger(doc="Rank in the Alexa Top 1 Million. "
"Null if not currently in the Top 1 Million sites.",
category="Basic Information"),
"updated_at":Timestamp(),
"zdb_version":Unsigned32BitInteger(),
"protocols":ListOf(String(), category="Basic Information"),
"ports":ListOf(Unsigned16BitInteger())
})
DROP_KEYS = {'ip_address', 'metadata', 'tags', 'timestamp'}
zschema.registry.register_schema("ipv4host", ipv4_host)
zschema.registry.register_schema("website", website)
| StarcoderdataPython |
1604112 | <gh_stars>1-10
import importlib
import os
import sys
def to_list():
root = os.path.dirname((os.path.dirname(__file__)))
temp_dir = os.path.join(root, "templates")
return os.listdir(temp_dir)
def create(temp_name, dst):
# 检查templates目录下有没有对应的目录
if temp_name not in to_list():
print(f"无该{temp_name}模板项目.")
sys.exit(0)
model = importlib.import_module("builder.cli.project")
class_name = f"{temp_name.capitalize()}Project"
clazz = getattr(model, class_name)
if not clazz:
print(f"No {temp_name} template!")
sys.exit(2)
instance = clazz()
instance.run(dst)
# print(f"创建{temp_name}项目完成, 在\"{instance.output_dir}\".")
print(f"{temp_name} project created successfully! 🎉")
print(f"Location at \"{instance.output_dir}\".")
if __name__ == '__main__':
# create("python", "/Users/xiangzheng/developer/projects/personal/")
create("spring", "/Users/xiangzheng/developer/projects/personal/")
# create("golang", "/Users/xiangzheng/developer/projects/personal/")
# create("python", "/Users/xiangzheng/developer/projects/personal/")
| StarcoderdataPython |
3379706 | import argparse
import random
import os
import random
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.autograd import Variable
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
parser = argparse.ArgumentParser(description='Prediction APP')
parser.add_argument('image_path', action="store", help="Location of image to predict")
parser.add_argument('checkpoint', action="store", help="Location of last checkpoint for prediction")
parser.add_argument('--top_k', action="store", dest= "top_k", default=5, help="Number of most likely classes")
parser.add_argument('--category_names', action="store", dest="category_names", default='cat_to_name.json', help="Mapping of categories to real names")
parser.add_argument('--gpu', action='store_true', default=False, dest='gpu', help="Set GPU mode")
print(parser.parse_args())
results = parser.parse_args()
def build_model(input_size, hidden_size, output_size, arch):
action = 'models.' + arch + '(pretrained=True)'
model = eval(action)
print('Using pretrained model ' + arch)
print("Building classifier with " + format(hidden_size) + " Hidden Units")
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_size)),
('drop1', nn.Dropout(p=0.1)),
('relu1', nn.ReLU()),
('logits', nn.Linear(hidden_size, output_size)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
return model
def load_checkpoint(filepath):
checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)
model = build_model(checkpoint['input_size'], checkpoint['hidden_layer'], checkpoint['output_size'], checkpoint['arch'])
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
im.load()
im = im.resize((256,256))
value = 0.5*(256-224)
im = im.crop((value,value,256-value,256-value))
im = np.array(im)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
im = (im - mean)/std
im = im.transpose((2, 0, 1))
return im
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
if gpu == True:
mode = 'cuda'
print('Running in GPU Mode...')
else:
mode = 'cpu'
print('Running in CPU Mode...')
model.to(mode)
model.eval()
image = process_image(image_path)
image = torch.from_numpy(np.array([image])).float()
image = Variable(image).to(mode)
output = model.forward(image)
probabilities = torch.exp(output).data
prob = torch.topk(probabilities, topk)[0].tolist()[0]
index = torch.topk(probabilities, topk)[1].tolist()[0]
ind = []
for i in range(len(model.class_to_idx.items())):
ind.append(list(model.class_to_idx.items())[i][0])
# transfer index to label
label = []
for i in range(topk):
label.append(ind[index[i]])
return prob, label
import json
with open(format(results.category_names), 'r') as f:
cat_to_name = json.load(f)
model_load = load_checkpoint(format(results.checkpoint))
model_load
image = format(results.image_path)
probs, classes = predict(image, model_load, int(results.top_k), results.gpu)
labels = []
for cl in classes:
labels.append(cat_to_name[cl])
print(probs)
print(labels)
#How to run
# python predict.py ./flowers/test/1/image_06752.jpg checkpoint/checkpoint.pth --top_k 3 | StarcoderdataPython |
156392 | <gh_stars>1-10
from django.views.generic import (TemplateView)
class VisualiseTemplateView(TemplateView):
"""
Class-based view to show the visualise template
"""
template_name = 'researchdata/visualise.html'
| StarcoderdataPython |
27131 | import ply.lex as lex
import ply.yacc as yacc
import lexer
import sys
import ast
tokens = lexer.tokens
precedence = (
('right', 'ELSE'),
)
def p_start (t):
'''start : program'''
t[0] = t[1]
def p_program_01 (t):
'''program : program_part'''
t[0] = ast.Program(t[1])
def p_program_02 (t):
'''program : program program_part'''
t[1].add(t[2])
t[0] = t[1]
def p_program_part (t):
'''program_part : include_directive
| typedef
| structdef
| using_directive
| function_definition
| declaration_statement
| comment
'''
t[0] = t[1]
def p_typedef_01 (t):
'''typedef : typedef_body SEMI'''
t[0] = t[1]
def p_typedef_body (t):
'''typedef_body : TYPEDEF type IDENTIFIER'''
lexer.typedefs[t[3]] = 'TYPEID'
t[0] = ast.TypeDef(t[2], t[3])
def p_structdef (t):
'''structdef : struct_name LBRA struct_elem_list RBRA SEMI'''
t[3].id = t[1]
t[0] = t[3]
def p_struct_name (t):
'''struct_name : STRUCT IDENTIFIER'''
print "Added typeid " + t[2]
lexer.typedefs[t[2]] = 'TYPEID'
t[0] = t[2]
def p_struct_elem_list_01 (t):
'''struct_elem_list : declaration_statement'''
t[0] = ast.StructDef(t[1])
def p_struct_elem_list_02 (t):
'''struct_elem_list : struct_elem_list declaration_statement'''
t[1].add(t[2])
t[0] = t[1]
def p_struct_elem (t):
'''struct_elem : type identifier_list SEMI'''
for c in t[2].children:
c.type = t[1]
t[0] = t[2]
def p_identifier_list_01 (t):
'''identifier_list : IDENTIFIER'''
t[0] = ast.VariableDeclarationStatement(ast.VariableDeclaration(t[1]))
def p_identifier_list_02 (t):
'''identifier_list : identifier_list COMMA IDENTIFIER'''
t[1].add(ast.VariableDeclaration(t[3]))
t[0] = t[1]
def p_comment_01 (t):
'''comment : LINECOM'''
t[0] = ast.LineComment(t[1])
def p_comment_02 (t):
'''comment : BLOCKCOM'''
t[0] = ast.BlockComment(t[1])
def p_include_directive_01 (t):
'''include_directive : INCLUDE LT IDENTIFIER GT
| INCLUDE LT STRING GT
| INCLUDE LT VECTOR GT'''
t[0] = ast.Include(t[3])
def p_include_directive_02 (t):
'''include_directive : INCLUDE STRING_LIT'''
t[0] = ast.Include(t[2])
def p_using_directive (t):
'''using_directive : USING NAMESPACE IDENTIFIER SEMI'''
t[0] = ast.UsingNamespace(t[3])
def p_function_definition_01 (t):
'''function_definition : type IDENTIFIER LPAR RPAR block'''
t[0] = ast.Function(t[2], t[1], ast.FormalParametersList(), t[5])
def p_function_definition_02 (t):
'''function_definition : type IDENTIFIER LPAR formal_parameters_list RPAR block'''
t[0] = ast.Function(t[2], t[1], t[4], t[6])
def p_empty (t):
'''empty :'''
pass
def p_formal_parameters_list_01 (t):
'''formal_parameters_list : formal_parameter'''
t[0] = ast.FormalParametersList(t[1])
def p_formal_parameters_list_02 (t):
'''formal_parameters_list : formal_parameters_list COMMA formal_parameter'''
t[1].add(t[3])
t[0] = t[1]
def p_formal_parameter_01 (t):
'''formal_parameter : type IDENTIFIER'''
t[0] = ast.FormalParameter(t[2], t[1])
t[0].is_ref = False
def p_formal_parameter_02 (t):
'''formal_parameter : type AND IDENTIFIER'''
t[0] = ast.FormalParameter(t[3], t[1])
t[0].is_ref = True
t[0].type.is_reference = True
def p_statement_list_01 (t):
'''statement_list : statement'''
t[1].isStatement = True
t[0] = ast.CompoundStatement(t[1])
def p_statement_list_02 (t):
'''statement_list : statement_list statement'''
t[2].isStatement = True
t[1].add(t[2])
t[0] = t[1]
def p_statement (t):
'''statement : declaration_statement
| cout_statement
| cin_statement
| while_statement
| for_statement
| if_statement
| assignment_statement
| return_statement
| block
| comment
| empty_statement
'''
# | while_statement_cin
t[0] = t[1]
def p_empty_statement (t):
'''empty_statement : '''
t[0] = ast.NullNode()
def p_block (t):
'''block : LBRA statement_list RBRA'''
t[0] = t[2]
def p_cout_statement_01 (t):
'''cout_statement : COUT cout_elements_list SEMI'''
t[0] = t[2]
def p_cout_statement_02 (t):
'''cout_statement : CERR cout_elements_list SEMI'''
t[0] = t[2]
def p_cout_statement_03 (t):
'''cout_statement : COUT DOT IDENTIFIER LPAR actual_parameters_list RPAR SEMI'''
t[0] = ast.CoutModifier(t[3], t[5])
def p_cout_statement_04 (t):
'''cout_statement : CERR DOT IDENTIFIER LPAR actual_parameters_list RPAR SEMI'''
t[0] = ast.CoutModifier(t[3], t[5])
def p_cout_elements_list_01 (t):
'''cout_elements_list : LPUT cout_element'''
t[0] = ast.CoutStatement(t[2])
def p_cout_elements_list_02 (t):
'''cout_elements_list : cout_elements_list LPUT cout_element'''
t[1].add(t[3])
t[0] = t[1]
def p_cout_element_01 (t):
'''cout_element : ENDL'''
t[0] = ast.CoutBreakLine();
def p_cout_element_02 (t):
'''cout_element : lor_expression'''
t[0] = ast.CoutElement(t[1])
def p_cin_bloc (t):
'''cin_bloc : CIN cin_elements_list'''
t[0] = t[2]
t[0].is_expression = True
def p_cin_statement (t):
'''cin_statement : CIN cin_elements_list SEMI'''
t[0] = t[2]
t[0].is_expression = False
def p_cin_elements_list_01 (t):
'''cin_elements_list : RPUT reference_expression'''
t[0] = ast.CinStatement(t[2])
def p_cin_elements_list_02 (t):
'''cin_elements_list : cin_elements_list RPUT reference_expression'''
t[1].add(t[3])
t[0] = t[1]
def p_literal_01 (t):
'''literal : INTEGER_LIT'''
t[0]=ast.IntLiteral(t[1])
def p_literal_02 (t):
'''literal : REAL_LIT'''
t[0]=ast.FloatLiteral(t[1])
def p_literal_03 (t):
'''literal : TRUE
| FALSE'''
t[0]=ast.BoolLiteral(t[1])
def p_literal_04 (t):
'''literal : STRING_LIT'''
t[0]=ast.StringLiteral(t[1])
def p_literal_05 (t):
'''literal : CHAR_LIT'''
t[0]=ast.CharLiteral(t[1])
def p_factor_01 (t):
'''factor : literal'''
t[0] = t[1]
def p_factor_02 (t):
'''factor : reference_expression'''
t[0] = t[1]
def p_factor_03(t):
'''factor : LPAR assignment_expression RPAR'''
t[0] = ast.Parenthesis(t[2])
def p_factor_04 (t):
'''factor : IDENTIFIER LPAR actual_parameters_list RPAR'''
t[0] = ast.FunctionCall(t[1], t[3])
def p_factor_05 (t):
'''factor : IDENTIFIER COLONCOLON assignment_expression'''
t[0] = t[3]
def p_factor_06 (t):
'''factor : reference_expression DOT IDENTIFIER LPAR actual_parameters_list RPAR'''
t[0] = ast.FunctionCall(t[3], t[5], t[1])
def p_factor_07 (t):
'''factor : type LPAR actual_parameters_list RPAR'''
t[0] = ast.Constructor(t[1], t[3])
def p_factor_08 (t):
'''factor : LPAR type RPAR assignment_expression'''
t[0] = ast.CastExpression(t[2], t[4])
def p_reference_expression_01 (t):
'''reference_expression : IDENTIFIER'''
t[0] = ast.Identifier(t[1])
def p_reference_expression_02 (t):
'''reference_expression : reference_expression LCOR relational_expression RCOR'''
t[0] = ast.Reference(t[1], t[3])
def p_reference_expression_03 (t):
'''reference_expression : reference_expression DOT IDENTIFIER'''
t[0] = ast.StructReference(t[1], t[3])
def p_unary_expression_01(t):
'''unary_expression : unary_operator factor
| PLUSPLUS unary_expression
| MINUSMINUS unary_expression
'''
t[0]=ast.UnaryOp(t[1],t[2])
t[0].pre = True
def p_unary_expression_02(t):
'''unary_expression : unary_expression PLUSPLUS
| unary_expression MINUSMINUS
'''
t[0]=ast.UnaryOp(t[2],t[1])
t[0].pre = False
def p_unary_expression_03(t):
'''unary_expression : factor
'''
t[0]=t[1]
# me faltara tema ++
def p_cast_expression_01(t):
'''
cast_expression : unary_expression
'''
t[0]=t[1]
def p_cast_expression_02(t):
'''
cast_expression : type LPAR lor_expression RPAR
'''
t[0]=ast.CastExpression(t[1],t[3])
def p_multiplicative_expression_01(t):
'''
multiplicative_expression : unary_expression
'''
t[0]=t[1]
def p_multiplicative_expression_02(t):
'''
multiplicative_expression : multiplicative_expression multiplicative_operator unary_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3]);
def p_additive_expression_01(t):
'''
additive_expression : multiplicative_expression
'''
t[0]=t[1]
def p_additive_expression_02(t):
'''
additive_expression : additive_expression additive_operator multiplicative_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
#def p_shift_expression_01(t):
#'''
#shift_expression : additive_expression
#'''
#t[0]=t[1]
#def p_shift_expression_02(t):
#'''
#shift_expression : shift_expression shift_operator additive_expression
#'''
#t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_relational_expression_01(t):
'''
relational_expression : additive_expression
'''
t[0]=t[1]
def p_relational_expression_02(t):
'''
relational_expression : relational_expression relational_operator additive_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_equality_expression_01(t):
'''
equality_expression : relational_expression
'''
t[0]=t[1]
def p_equality_expression_02(t):
'''
equality_expression : equality_expression equality_operator relational_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_and_expression_01(t):
'''
and_expression : equality_expression
'''
t[0]=t[1]
def p_and_expression_02(t):
'''
and_expression : and_expression AND equality_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_xor_expression_01(t):
'''
xor_expression : and_expression
'''
t[0]=t[1]
def p_xor_expression_02(t):
'''
xor_expression : xor_expression XOR and_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_or_expression_01(t):
'''
or_expression : xor_expression
| cin_bloc
'''
t[0]=t[1]
def p_or_expression_02(t):
'''
or_expression : or_expression OR xor_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_land_expression_01(t):
'''
land_expression : or_expression
'''
t[0]=t[1]
def p_land_expression_02(t):
'''
land_expression : land_expression LAND or_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_lor_expression_01(t):
'''
lor_expression : land_expression
'''
t[0]=t[1]
def p_lor_expression_02(t):
'''
lor_expression : lor_expression LOR land_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_assignment_expression_01(t):
'''
assignment_expression : lor_expression
'''
t[0]=t[1]
def p_assignment_expression_02(t): # a=b=3
'''
assignment_expression : reference_expression assignment_operator assignment_expression
'''
t[0]=ast.AssignmentStatement(t[1],t[2],t[3]) # ojo q se puede liar una buena asignandoCONTROLAR
def p_declaration_statement_01(t):
'''
declaration_statement : type declaration_list SEMI
'''
# para cada elemento de la declarator list crear un nodo declaracion
for c in t[2].children:
c.type=t[1]
t[0]=t[2]
#def p_declaration_statement_02(t):
#'''
#declaration_statement : declaration_statement_init
#'''
## para cada elemento de la declarator list crear un nodo declaracion
#t[0]=t[1]
#def p_declaration_statement_init(t):
#'''
#declaration_statement_init : type declaration_list EQUALS initializer SEMI
#'''
## para cada elemento de la declarator list crear un nodo declaracion
#for c in t[2].children:
#c.type=t[1]
#c.init=t[4]
#t[0]=t[2]
#def p_declaration_statement_03(t):
# '''
# declaration_statement : struct ID LBRA RBRA
# '''
def p_declaration_list_01(t):
'''
declaration_list : declaration_list COMMA declaration
'''
t[1].add(t[3])
t[0]=t[1]
def p_declaration_list_02(t):
'''
declaration_list : declaration
'''
t[0]=ast.VariableDeclarationStatement(t[1])
def p_declaration_01(t):
'''
declaration : IDENTIFIER
'''
t[0]=ast.VariableDeclaration(t[1])
def p_declaration_02(t):
'''
declaration : IDENTIFIER EQUALS initializer
'''
t[0]=ast.VariableDeclaration(t[1])
t[0].init = t[3]
def p_declaration_03(t):
'''
declaration : IDENTIFIER LPAR actual_parameters_list RPAR
'''
t[0]=ast.VariableDeclaration(t[1])
t[0].params = t[3]
def p_declaration_04(t):
'''
declaration : IDENTIFIER LPAR RPAR
'''
t[0]=ast.VariableDeclaration(t[1])
t[0].cons = ast.ActualParametersList()
def p_initializer(t): # ampliable con vectores
'''
initializer : lor_expression
'''
t[0]=t[1]
def p_assignment_statement(t):
'''
assignment_statement : assignment_expression SEMI
'''
t[0]=t[1]
def p_type_01 (t):
'''type : TYPEID'''
t[0] = ast.CustomType(t[1])
def p_type_02 (t):
'''type : VOID
| INT
| FLOAT
| DOUBLE
| CHAR
| BOOL
| STRING'''
t[0] = ast.Type(t[1])
def p_type_03 (t): #PRODUCE AMBIGUEDAD
'''type : CONST type'''
t[0] = t[2]
t[0].constant = True
def p_type_04 (t):
'''type : VECTOR LT type GT'''
t[0] = ast.VectorType(t[1], t[3])
def p_unary_operator(t):
'''
unary_operator : MINUS
| LNOT
'''
t[0]=t[1]
def p_multiplicative_operator(t):
'''
multiplicative_operator : MULT
| DIV
| MOD
'''
t[0]=t[1]
def p_additive_operator(t):
'''
additive_operator : PLUS
| MINUS
'''
t[0]=t[1]
def p_shift_operator(t):
'''
shift_operator : RPUT
| LPUT
'''
t[0]=t[1]
def p_relational_operator(t):
'''
relational_operator : GT
| LT
| LE
| GE
'''
t[0]=t[1]
def p_equality_operator(t):
'''
equality_operator : EQ
| NE
'''
t[0]=t[1]
def p_assignment_operator(t):
'''
assignment_operator : EQUALS
| MULTEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| ANDEQUAL
| OREQUAL
| XOREQUAL
| RIGHTSHIFTEQUAL
| LEFTSHIFTEQUAL
'''
t[0]=t[1]
def p_while_statement_01 (t):
'''while_statement : WHILE LPAR lor_expression RPAR statement'''
t[0] = ast.WhileStatement(t[3], t[5])
t[5].isStatement = True
def p_while_statement_02 (t):
'''while_statement : WHILE LPAR lor_expression RPAR SEMI'''
t[0] = ast.WhileStatement(t[3], ast.NullNode())
#def p_while_statement_cin (t):
#'''while_statement_cin : WHILE LPAR cin_bloc RPAR statement'''
#t[0] = ast.WhileStatementCin(t[3], t[5])
def p_for_statement (t):
'''for_statement : FOR LPAR assignment_statement assignment_statement assignment_expression RPAR statement'''
t[0] = ast.ForStatement(t[3], t[4], t[5], t[7])
t[7].isStatement = True
def p_for_statement_init (t):
'''for_statement : FOR LPAR declaration_statement assignment_statement assignment_expression RPAR statement'''
t[0] = ast.ForStatementInit(t[3], t[4], t[5], t[7])
t[7].isStatement = True
def p_if_statement_01 (t):
'''if_statement : IF LPAR assignment_expression RPAR statement'''
t[0] = ast.IfStatement(t[3], t[5])
t[5].isStatement = True
def p_if_statement_02(t):
'''if_statement : IF LPAR assignment_expression RPAR statement ELSE statement'''
t[0] = ast.IfStatement(t[3], t[5], t[7])
t[5].isStatement = True
t[7].isStatement = True
def p_return_statement_01 (t):
'''return_statement : RETURN assignment_statement'''
t[0] = ast.ReturnStatement(t[2])
def p_return_statement_02 (t):
'''return_statement : RETURN SEMI'''
t[0] = ast.ReturnStatement(None)
def p_actual_parameters_list_01 (t):
'''actual_parameters_list : empty'''
t[0] = ast.ActualParametersList()
def p_actual_parameters_list_02 (t):
'''actual_parameters_list : actual_parameter'''
t[0] = ast.ActualParametersList(t[1])
def p_actual_parameters_list_03 (t):
'''actual_parameters_list : actual_parameters_list COMMA actual_parameter'''
t[1].add(t[3])
t[0] = t[1]
def p_actual_parameter (t):
'''actual_parameter : assignment_expression'''
t[0] = t[1]
def p_error (t):
print 'Syntax error around line %d in token %s.' % (t.lineno, t.type)
yacc.errok()
#raise Exception('Syntax error around line %d in token %s.' % (t.lineno, t.type))
# Build the parser
parser = yacc.yacc()
| StarcoderdataPython |
167184 | """
Scikit-Optimize, or `skopt`, is a simple and efficient library to
minimize (very) expensive and noisy black-box functions. It implements
several methods for sequential model-based optimization. `skopt` is reusable
in many contexts and accessible.
[![Build Status](https://travis-ci.org/scikit-optimize/scikit-optimize.svg?branch=master)](https://travis-ci.org/scikit-optimize/scikit-optimize)
## Install
```
pip install scikit-optimize
```
## Getting started
Find the minimum of the noisy function `f(x)` over the range `-2 < x < 2`
with `skopt`:
```python
import numpy as np
from skopt import gp_minimize
def f(x):
return (np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) *
np.random.randn() * 0.1)
res = gp_minimize(f, [(-2.0, 2.0)])
```
For more read our [introduction to bayesian optimization](https://scikit-optimize.github.io/notebooks/bayesian-optimization.html)
and the other [examples](https://github.com/scikit-optimize/scikit-optimize/tree/master/examples).
## Development
The library is still experimental and under heavy development.
The development version can be installed through:
git clone https://github.com/scikit-optimize/scikit-optimize.git
cd scikit-optimize
pip install -r requirements.txt
python setup.py develop
Run the tests by executing `pytest` in the top level directory.
"""
from . import acquisition
from . import benchmarks
from . import callbacks
from . import learning
from . import optimizer
from . import space
from .optimizer import dummy_minimize
from .optimizer import forest_minimize
from .optimizer import gbrt_minimize
from .optimizer import gp_minimize
from .optimizer import Optimizer
from .searchcv import BayesSearchCV
from .space import Space
from .utils import dump
from .utils import expected_minimum
from .utils import load
__version__ = "0.6"
__all__ = (
"acquisition",
"benchmarks",
"callbacks",
"learning",
"optimizer",
"plots",
"space",
"gp_minimize",
"dummy_minimize",
"forest_minimize",
"gbrt_minimize",
"Optimizer",
"dump",
"load",
"expected_minimum",
"BayesSearchCV",
"Space"
)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| StarcoderdataPython |
1773549 | <filename>daydayup_submit/daydayup_model.py<gh_stars>1-10
from daydayup_layer import GCN, My_APPNP, Cheb_Net, ARMA_Net, GAT_Net, SGC_Net, TAG_Net, DNA_Net
import torch
import torch.nn.functional as F
import lightgbm as lgb
from torch_geometric.nn import GCNConv, Node2Vec
from torch.nn import PReLU
from sklearn.linear_model import LogisticRegression
from torch.utils.data import DataLoader
import time
import xgboost as xgb
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from xgboost import XGBClassifier
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from collections import Counter
class GCNTrainer(object):
def __init__(self, data, lr=0.005, weight_decay=2e-4, epochs=700, features_num=16, num_class=2, num_layers=3, hidden=128):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.num_layers = num_layers
self.hidden = hidden
self.features_num = features_num
self.num_class = num_class
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = GCN(features_num=self.features_num, num_class=self.num_class, hidden=self.hidden, num_layers=self.num_layers)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class MyAPPNPTrainer(object):
def __init__(self, data, lr=0.005, weight_decay=2.5e-4, epochs=500):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = My_APPNP(num_features=self.features_num, num_class=self.num_class)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class ChebTrainer(object):
def __init__(self, data, lr=0.005, weight_decay=5e-4, epochs=600, features_num=16, num_class=2, hidden=64, num_hops=3, dropout=0.5):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.features_num = features_num
self.num_class = num_class
self.hidden = hidden
self.num_hops = num_hops
self.dropout = dropout
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = Cheb_Net(features_num=self.features_num, num_class=self.num_class, hidden=self.hidden, num_hops=self.num_hops, dropout=self.dropout)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class ARMATrainer(object):
def __init__(self, data, lr=0.027, weight_decay=5e-4, epochs=700, features_num=16, num_class=2, hidden=16, num_stacks=1, num_layers=1, shared_weights=True, dropout=0.5, skip_dropout=0.75):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.features_num = features_num
self.num_class = num_class
self.hidden = hidden
self.num_stacks = num_stacks
self.num_layers = num_layers
self.shared_weights = shared_weights
self.dropout = dropout
self.skip_dropout = skip_dropout
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = ARMA_Net(features_num=self.features_num, num_class=self.num_class, hidden=self.hidden, num_stacks=self.num_stacks, num_layers=self.num_layers, shared_weights=self.shared_weights, dropout=self.dropout, skip_dropout=self.skip_dropout)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class GATTrainer(object):
def __init__(self, data, lr=0.005, weight_decay=2e-4, epochs=600, features_num=16, num_class=2, hidden=16, heads=3, output_heads=1, concat=True, dropout=0.6):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.features_num = features_num
self.num_class = num_class
self.hidden = hidden
self.heads = heads
self.output_heads = output_heads
self.concat = concat
self.dropout = dropout
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = GAT_Net(features_num=self.features_num, num_class=self.num_class, hidden=self.hidden, heads=self.heads, output_heads=self.output_heads, concat=self.concat, dropout=self.dropout)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class SGCTrainer(object):
def __init__(self, data, lr=0.005, weight_decay=5e-4, epochs=800, features_num=16, num_class=2, K=3, cached=True):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.features_num = features_num
self.num_class = num_class
self.K = K
self.cached = cached
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = SGC_Net(features_num=self.features_num, num_class=self.num_class, K=self.K, cached=self.cached)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class TAGTrainer(object):
def __init__(self, data, lr=0.018, weight_decay=2e-4, epochs=500, features_num=16, num_class=2, hidden=16, dropout=0.5):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.features_num = features_num
self.num_class = num_class
self.hidden = hidden
self.dropout = dropout
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = TAG_Net(features_num=self.features_num, num_class=self.num_class, hidden=self.hidden, dropout=self.dropout)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class DNATrainer(object):
def __init__(self, data, lr=0.005, weight_decay=5e-4, epochs=500, features_num=16, num_class=2, num_layers=5, hidden=128, heads=4, groups=16 ,dropout=0.5):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.weight_decay = weight_decay
self.epochs = epochs
self.features_num = features_num
self.num_class = num_class
self.num_layers = num_layers
self.hidden = hidden
self.heads = heads
self.groups = groups
self.dropout = dropout
def train_nn(self):
self.features_num = self.data.x.size()[1]
self.num_class = int(max(self.data.y)) + 1
self.model = DNA_Net(features_num=self.features_num, num_class=self.num_class, num_layers=self.num_layers, hidden=self.hidden, heads=self.heads, groups=self.groups ,dropout=self.dropout)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for epoch in range(1, self.epochs+1):
self.model.train()
self.optimizer.zero_grad()
loss = F.nll_loss(self.model(self.data)[self.data.train_mask], self.data.y[self.data.train_mask])
if epoch % 100 == 0:
print(epoch, loss)
loss.backward()
self.optimizer.step()
self.model.eval()
with torch.no_grad():
pred = self.model(self.data)[self.data.test_mask].max(1)[1]
return pred.cpu().numpy().flatten()
class N2VTrainer(object):
def __init__(self, data, lr=0.001, epochs=4):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data = data
self.lr = lr
self.epochs = epochs
def test(self, train_z, train_y, test_z, solver='lbfgs', multi_class='auto', *args, **kwargs):
clf = LogisticRegression(
solver=solver,
multi_class=multi_class,
*args,
**kwargs
)
clf.fit(train_z.detach().cpu().numpy(), train_y.detach().cpu().numpy())
pred = clf.predict(test_z.detach().cpu().numpy())
return pred
def train_nn(self):
self.model = Node2Vec(
self.data.num_nodes,
embedding_dim=128,
walk_length=20,
context_size=10,
walks_per_node=10
)
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
self.loader = DataLoader(torch.arange(self.data.num_nodes), batch_size=128, shuffle=True)
for epoch in range(1, self.epochs+1):
t1 = time.time()
self.model.train()
total_loss = 0
for subset in self.loader:
self.optimizer.zero_grad()
loss = self.model.loss(self.data.edge_index, subset.to(self.device))
loss.backward()
self.optimizer.step()
total_loss += loss.item()
total_loss = total_loss/len(self.loader)
print("epoch: %d, time elapsed: %.2f, loss: %.5f" % (epoch, time.time()-t1, total_loss))
self.model.eval()
with torch.no_grad():
z = self.model(torch.arange(self.data.num_nodes, device=self.device))
return z
class GBDTTrainer(object):
def __init__(self, train_x, train_y, test_x, n_class):
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.n_class = n_class
def train_nn(self):
clf = GradientBoostingClassifier(
loss="deviance", # "exponential"
learning_rate=0.1,
n_estimators=100,
criterion="friedman_mse",
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_depth=5,
min_impurity_decrease=0.,
min_impurity_split=1e-7,
subsample=1.0,
max_features=None, # "auto" "sqrt" "log2"
random_state=1234,
verbose=0,
max_leaf_nodes=None,
warm_start=False,
presort='auto',
validation_fraction=0.1,
n_iter_no_change=None,
tol=1e-4
)
clf.fit(self.train_x, self.train_y)
pred = clf.predict(self.test_x)
return pred
class XGBTrainer(object):
def __init__(self,
train_x,
train_y,
test_x,
n_class,
prob=False,
max_depth=5,
learning_rate=0.08,
n_estimators=100,
silent=True,
objective="multi:softmax",
booster='gbtree',
n_jobs=1,
nthread=None,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=0.6,
colsample_bytree=0.6,
colsample_bylevel=1,
reg_alpha=1.0,
reg_lambda=0.8,
scale_pos_weight=1,
base_score=0.5,
random_state=214,
seed=None,
missing=None):
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.n_class = n_class
self.prob = prob
self.max_depth = max_depth
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.silent = silent
self.objective = objective
self.booster = booster
self.n_jobs = n_jobs
self.nthread = nthread
self.gamma = gamma
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.colsample_bylevel = colsample_bylevel
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.scale_pos_weight = scale_pos_weight
self.base_score = base_score
self.random_state = random_state
self.seed = seed
self.missing = missing
def train_nn(self):
model = XGBClassifier(
max_depth=self.max_depth,
learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
silent=self.silent,
objective=self.objective,
booster=self.booster,
n_jobs=self.n_jobs,
nthread=self.nthread,
gamma=self.gamma,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
subsample=self.subsample,
colsample_bytree=self.colsample_bytree,
colsample_bylevel=self.colsample_bylevel,
reg_alpha=self.reg_alpha,
reg_lambda=self.reg_lambda,
scale_pos_weight=self.scale_pos_weight,
base_score=self.base_score,
random_state=self.random_state,
seed=self.seed,
missing=self.missing)
model.fit(self.train_x, self.train_y)
if self.prob:
pred = model.predict_proba(self.test_x)
else:
pred = model.predict(self.test_x)
return pred
| StarcoderdataPython |
1784896 | """ a delay line using pre-parsed sax events
"""
from xml.sax import ContentHandler
class OneEventDelay(ContentHandler):
"pre-parse sax document then feed events to handler"
def __init__(self):
""
self.events = []
self.index = 0
def set_handler(self, handler):
self.handler = handler
self.handler.pre_parser = self
self.handler.read_ahead = self.read_ahead
def store(self, what, **kw):
"store events"
self.events.append((what, kw))
def handle(self, event):
"make our handler handle an event"
# this should handle iterators as gracefully as methods
fname, kw = event
fn = getattr(self.handler, fname)
return fn(**kw)
def read_ahead(self):
"return next-event-but-one"
index = self.index + 1
if index < len(self.events):
return self.events[index]
else:
return '', {}
def next(self):
"pass the next event to our handler"
if self.index < len(self.events):
res = self.handle(self.events[self.index])
self.index += 1
return res
else:
# reset the index for the next run through
self.index = 0
raise StopIteration
def handler_read_ahead(self):
"read event method for our handler"
return self.pre_parser.read_ahead()
# events
def startDocument(self):
""
self.store('startDocument')
def startElement(self, tag, atts):
""
self.store('startElement', tag=tag, atts=atts)
def characters(self, content):
""
self.store('characters', content=content)
def endElement(self, tag):
""
self.store('endElement', tag=tag)
def endDocument(self):
""
self.store('endDocument')
# be an iterator
def __iter__(self):
return self
class GeneratingHandler(ContentHandler):
"flow oriented handler"
# events
def startDocument(self):
""
self.store('startDocument')
def startElement(self, tag, atts):
""
self.store('startElement', tag=tag, atts=atts)
def characters(self, content):
""
self.store('characters', content=content)
def endElement(self, tag):
""
self.store('endElement', tag=tag)
def endDocument(self):
""
self.store('endDocument')
#### TEST ####
class PrintingContentHandler(ContentHandler):
"a content handler to print out events"
def show(self, event, **kw):
"show an event"
print event, kw
# events
def startDocument(self):
""
self.show('startDocument')
def startElement(self, tag, atts):
""
self.show('startElement', tag=tag, atts=atts)
def characters(self, content):
""
self.show('characters', content=content)
def endElement(self, tag):
""
self.show('endElement', tag=tag)
def endDocument(self):
""
self.show('endDocument')
# flush out event queue here
self.show('')
self.show('')
if __name__=='__main__':
from xml.sax import parse
handler = PrintingContentHandler()
pre = OneEventDelay(handler)
parse('test.xml', pre)
| StarcoderdataPython |
3316813 | <filename>qcengine/config.py
"""
Creates globals for the qcengine module
"""
import fnmatch
import getpass
import logging
import os
import socket
from typing import Any, Dict, Optional, Union
import pydantic
from .extras import get_information
__all__ = ["get_config", "get_provenance_augments", "global_repr", "NodeDescriptor"]
# Start a globals dictionary with small starting values
_global_values = None
NODE_DESCRIPTORS = {}
LOGGER = logging.getLogger("QCEngine")
LOGGER.setLevel(logging.CRITICAL)
# Generic globals
def get_global(key: Optional[str] = None) -> Union[str, Dict[str, Any]]:
import cpuinfo
import psutil
global _global_values
if _global_values is None:
_global_values = {}
_global_values["hostname"] = socket.gethostname()
_global_values["memory"] = round(psutil.virtual_memory().available / (1024**3), 3)
_global_values["username"] = getpass.getuser()
# Work through VMs and logical cores.
if hasattr(psutil.Process(), "cpu_affinity"):
cpu_cnt = len(psutil.Process().cpu_affinity())
else:
cpu_cnt = psutil.cpu_count(logical=False)
if cpu_cnt is None:
cpu_cnt = psutil.cpu_count(logical=True)
_global_values["ncores"] = cpu_cnt
_global_values["cpuinfo"] = cpuinfo.get_cpu_info()
_global_values["cpu_brand"] = _global_values["cpuinfo"]["brand"]
if key is None:
return _global_values.copy()
else:
return _global_values[key]
class NodeDescriptor(pydantic.BaseModel):
"""
Description of an individual node
"""
# Host data
hostname_pattern: str
name: str
scratch_directory: Optional[str] = None # What location to use as scratch
memory: Optional[float] = None
memory_safety_factor: int = 10 # Percentage of memory as a safety factor
# Specifications
ncores: Optional[int] = None
jobs_per_node: int = 2
retries: int = 0
def __init__(self, **data: Dict[str, Any]) -> 'BaseModel':
data = parse_environment(data)
super().__init__(**data)
class Config:
extra = "forbid"
class JobConfig(pydantic.BaseModel):
# Specifications
ncores: int # Number of ncores per job
memory: float # Amount of memory in GiB per node
scratch_directory: Optional[str] # What location to use as scratch
retries: int # Number of retries on random failures
class Config:
extra = "forbid"
def _load_defaults() -> None:
"""
Pulls the defaults from the QCA folder
"""
# Find the config
load_path = None
test_paths = [os.getcwd(), os.path.join(os.path.expanduser('~'), ".qcarchive")]
if "DQM_CONFIG_PATH" in os.environ:
test_paths.insert(0, os.environ["DQM_CONFIG_PATH"])
for path in test_paths:
path = os.path.join(path, "qcengine.yaml")
if os.path.exists(path):
load_path = path
break
if load_path is None:
LOGGER.info("Could not find 'qcengine.yaml'. Searched the following paths: {}".format(", ".join(test_paths)))
LOGGER.info("Using default options...")
else:
import yaml
LOGGER.info("Found 'qcengine.yaml' at path: {}".format(load_path))
with open(load_path, "r") as stream:
user_config = yaml.load(stream)
for k, v in user_config.items():
NODE_DESCRIPTORS[k] = NodeDescriptor(name=k, **v)
# Pull in the local variables
_load_defaults()
def global_repr() -> str:
"""
A representation of the current global configuration.
"""
ret = ""
ret += "Host information:\n"
ret += "-" * 80 + "\n"
prov = get_provenance_augments()
for k in ["username", "hostname", "cpu"]:
ret += "{:<30} {:<30}\n".format(k, prov[k])
ret += "\nNode information:\n"
ret += "-" * 80 + "\n"
for k, v in get_node_descriptor():
ret += " {:<28} {}\n".format(k, v)
if k in ["scratch_directory", "memory_per_job"]:
ret += "\n"
ret += "\nJob information:\n"
ret += "-" * 80 + "\n"
for k, v in get_config():
ret += " {:<28} {}\n".format(k, v)
ret += "-" * 80 + "\n"
return ret
def get_node_descriptor(hostname: Optional[str] = None) -> NodeDescriptor:
"""
Find the correct NodeDescriptor based off current hostname
"""
if isinstance(hostname, NodeDescriptor):
return hostname
if hostname is None:
hostname = get_global("hostname")
# Find a match
for name, node in NODE_DESCRIPTORS.items():
if fnmatch.fnmatch(hostname, node.hostname_pattern):
config = node
break
else:
config = NodeDescriptor(name="default",
hostname_pattern="*",
memory=get_global("memory"),
ncores=get_global("ncores"))
return config
def parse_environment(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Parses a dictionary looking for environmental variables
"""
ret = {}
for k, var in data.items():
if isinstance(var, str) and var.startswith("$"):
var = var.replace("$", "", 1)
if var in os.environ:
var = os.environ[var]
else:
var = None
ret[k] = var
return ret
def get_config(*, hostname: Optional[str] = None, local_options: Dict[str, Any] = None) -> JobConfig:
"""
Returns the configuration key for qcengine.
"""
if local_options is None:
local_options = {}
local_options = parse_environment(local_options)
config = {}
# Node data
node = get_node_descriptor(hostname)
ncores = node.ncores or get_global("ncores")
config["scratch_directory"] = local_options.pop("scratch_directory", node.scratch_directory)
config["retries"] = local_options.pop("retries", node.retries)
# Jobs per node
jobs_per_node = local_options.pop("jobs_per_node", None) or node.jobs_per_node
# Handle memory
memory = local_options.pop("memory", None)
if memory is None:
memory = node.memory or get_global("memory")
memory_coeff = (1 - node.memory_safety_factor / 100)
memory = round(memory * memory_coeff / jobs_per_node, 3)
config["memory"] = memory
# Handle ncores
ncores = local_options.pop("ncores", int(ncores / jobs_per_node))
if ncores < 1:
raise KeyError("Number of jobs per node exceeds the number of available cores.")
config["ncores"] = ncores
if local_options is not None:
config.update(local_options)
return JobConfig(**config)
def get_provenance_augments() -> Dict[str, str]:
return {
"cpu": get_global("cpu_brand"),
"hostname": get_global("hostname"),
"username": get_global("username"),
"qcengine_version": get_information("version")
}
def get_logger() -> 'Logger':
return LOGGER
| StarcoderdataPython |
3244995 | <gh_stars>0
from __future__ import annotations
from dataclasses import dataclass
from pymonkey import token
TOKENMAPS = {
";": token.Token(token.SEMICOLON, ";"),
"(": token.Token(token.LPAREN, "("),
")": token.Token(token.RPAREN, ")"),
",": token.Token(token.COMMA, ","),
"{": token.Token(token.LBRACE, "{"),
"}": token.Token(token.RBRACE, "}"),
"+": token.Token(token.PLUS, "+"),
"-": token.Token(token.MINUS, "-"),
"*": token.Token(token.ASTERISK, "*"),
"/": token.Token(token.SLASH, "/"),
">": token.Token(token.GT, ">"),
"<": token.Token(token.LT, "<"),
"": token.Token(token.EOF, ""),
}
KEYWORDS = {
"fn": token.FUNCTION,
"let": token.LET,
"if": token.IF,
"else": token.ELSE,
"return": token.RETURN,
"true": token.TRUE,
"false": token.FALSE,
}
def new(_input: str) -> Lexer:
l = Lexer(_input)
l.read_char()
return l
def is_letter(ch: str) -> bool:
return "a" <= ch <= "z" or "A" <= ch <= "Z" or ch == "_"
def is_digit(ch: str) -> bool:
return "0" <= ch <= "9"
@dataclass
class Lexer:
_input: str = ""
_position: int = 0
_read_position: int = 0
_ch: str = ""
def read_char(self):
if self._read_position >= len(self._input):
self._ch = ""
else:
self._ch = self._input[self._read_position]
self._position = self._read_position
self._read_position += 1
def next_token(self) -> token.Token:
self.skip_whitespace()
ch = self._ch
if ch == "=":
if self.peek_char() == "=":
self.read_char()
tok = token.Token(token.EQ, "==")
else:
tok = token.Token(token.ASSIGN, "=")
elif ch == "!":
if self.peek_char() == "=":
self.read_char()
tok = token.Token(token.NOT_EQ, "!=")
else:
tok = token.Token(token.BANG, "!")
else:
tok = TOKENMAPS.get(ch)
if tok is None:
if is_letter(ch):
_literal = self.read_identifier()
tok = token.Token(KEYWORDS.get(_literal, token.IDENT), _literal)
return tok
elif is_digit(ch):
tok = token.Token(token.INT, self.read_number())
return tok
else:
tok = token.Token(token.ILLEGAL, ch)
self.read_char()
return tok
def skip_whitespace(self):
while any(
[self._ch == " ", self._ch == "\t", self._ch == "\n", self._ch == "\r"]
):
self.read_char()
def read_identifier(self) -> str:
pos = self._position
while is_letter(self._ch):
self.read_char()
return self._input[pos : self._position]
def read_number(self) -> str:
pos = self._position
while is_digit(self._ch):
self.read_char()
return self._input[pos : self._position]
def peek_char(self) -> str:
if self._read_position >= len(self._input):
return token.EOF
else:
return self._input[self._read_position]
| StarcoderdataPython |
1614793 | #-----------------------------------------------------------------------------
# training script
#-----------------------------------------------------------------------------
import torch, torchvision, torch.nn.functional as F
import argparse
from tqdm import tqdm
from pathlib import Path
from Model import DeepInfoMaxLoss
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 256
num_epochs = 1000
num_workers = 4
save_interval = 100
version = "cifar10_v2"
lr = 1e-4
# image size (3,32,32)
# batch size must be an even number
# shuffle must be True
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),])
train_dataset = torchvision.datasets.cifar.CIFAR10("~/.torch/", download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers)
dim = DeepInfoMaxLoss(alpha=0.5, beta=1.0, gamma=0.1).to(device)
optimizer = torch.optim.Adam(dim.parameters(), lr=lr)
dim.train()
for epoch in range(1, num_epochs+1):
Batch = tqdm(train_loader, total=len(train_dataset) // batch_size)
for i, (data, target) in enumerate(Batch, 1):
data = data.to(device)
Y, M = dim.encoder(data)
# shuffle batch to pair each element with another
M_fake = torch.cat((M[1:], M[0].unsqueeze(0)), dim=0)
loss = dim(Y, M, M_fake)
Batch.set_description(f"[{epoch:>3d}/{num_epochs:<3d}]Loss/Train: {loss.item():1.5e}")
dim.zero_grad()
loss.backward()
optimizer.step()
# checkpoint and save models
if epoch % save_interval == 0:
file = Path(f"./Models/{version}/checkpoint_epoch_{epoch}.pkl")
file.parent.mkdir(parents=True, exist_ok=True)
torch.save(dim.state_dict(), str(file))
| StarcoderdataPython |
3398468 | <filename>src/study/fashion_mnist/mnist_fashion_classifier.py
from src import network
import numpy as np
from src.study.utils import downloader
from src.study.mnist_common import mnist_reader
items = ["T-shirt/top","Trouser","Pullover","Dress","Coat","Sandal","Shirt","Sneaker","Bag","Ankle","boot"]
def download_mnist_fashion_data():
downloader.download_data("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com",
["train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz"])
download_mnist_fashion_data()
def train_model():
net = network.Network([784,30,10])
training_data,test_data = mnist_reader.load()
net.train("mnists_fashion_classifier.learnings", training_data, epochs=70, mini_batch_size=4, eta=0.01,
test_data=test_data)
return net
def print_result(actual,expected):
print("%s is detected as %s" % (items[expected],items[actual]))
def evaluate(net):
training_data, test_data = mnist_reader.load()
for test_sample in test_data[9000:]:
print_result(np.argmax(net.feedforward(test_sample[0])),test_sample[1])
if(__name__ == "__main__"):
evaluate(train_model()) | StarcoderdataPython |
1637494 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_checkers_swig')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_checkers_swig')
_checkers_swig = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_checkers_swig', [dirname(__file__)])
except ImportError:
import _checkers_swig
return _checkers_swig
try:
_mod = imp.load_module('_checkers_swig', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_checkers_swig = swig_import_helper()
del swig_import_helper
else:
import _checkers_swig
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _checkers_swig.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _checkers_swig.SwigPyIterator_value(self)
def incr(self, n=1):
return _checkers_swig.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _checkers_swig.SwigPyIterator_decr(self, n)
def distance(self, x):
return _checkers_swig.SwigPyIterator_distance(self, x)
def equal(self, x):
return _checkers_swig.SwigPyIterator_equal(self, x)
def copy(self):
return _checkers_swig.SwigPyIterator_copy(self)
def next(self):
return _checkers_swig.SwigPyIterator_next(self)
def __next__(self):
return _checkers_swig.SwigPyIterator___next__(self)
def previous(self):
return _checkers_swig.SwigPyIterator_previous(self)
def advance(self, n):
return _checkers_swig.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _checkers_swig.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _checkers_swig.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _checkers_swig.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _checkers_swig.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _checkers_swig.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _checkers_swig.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _checkers_swig.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
CellStatus__None = _checkers_swig.CellStatus__None
CellStatus_Black = _checkers_swig.CellStatus_Black
CellStatus_White = _checkers_swig.CellStatus_White
CellStatus_BlackQueen = _checkers_swig.CellStatus_BlackQueen
CellStatus_WhiteQueen = _checkers_swig.CellStatus_WhiteQueen
CellStatus_Forbidden = _checkers_swig.CellStatus_Forbidden
Team__None = _checkers_swig.Team__None
Team_Black = _checkers_swig.Team_Black
Team_White = _checkers_swig.Team_White
def TeamOfCell(status):
return _checkers_swig.TeamOfCell(status)
TeamOfCell = _checkers_swig.TeamOfCell
def Opponent(team):
return _checkers_swig.Opponent(team)
Opponent = _checkers_swig.Opponent
def IsQueen(cell):
return _checkers_swig.IsQueen(cell)
IsQueen = _checkers_swig.IsQueen
class GameState(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GameState, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GameState, name)
__repr__ = _swig_repr
__swig_setmethods__["State"] = _checkers_swig.GameState_State_set
__swig_getmethods__["State"] = _checkers_swig.GameState_State_get
if _newclass:
State = _swig_property(_checkers_swig.GameState_State_get, _checkers_swig.GameState_State_set)
__swig_setmethods__["Parent"] = _checkers_swig.GameState_Parent_set
__swig_getmethods__["Parent"] = _checkers_swig.GameState_Parent_get
if _newclass:
Parent = _swig_property(_checkers_swig.GameState_Parent_get, _checkers_swig.GameState_Parent_set)
__swig_setmethods__["CurrentTeam"] = _checkers_swig.GameState_CurrentTeam_set
__swig_getmethods__["CurrentTeam"] = _checkers_swig.GameState_CurrentTeam_get
if _newclass:
CurrentTeam = _swig_property(_checkers_swig.GameState_CurrentTeam_get, _checkers_swig.GameState_CurrentTeam_set)
def __init__(self, *args):
this = _checkers_swig.new_GameState(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
if _newclass:
CreateEmpty = staticmethod(_checkers_swig.GameState_CreateEmpty)
else:
CreateEmpty = _checkers_swig.GameState_CreateEmpty
def Equal(self, other):
return _checkers_swig.GameState_Equal(self, other)
def Hash(self):
return _checkers_swig.GameState_Hash(self)
if _newclass:
Index = staticmethod(_checkers_swig.GameState_Index)
else:
Index = _checkers_swig.GameState_Index
def IsTerminal(self):
return _checkers_swig.GameState_IsTerminal(self)
def At(self, i, j):
return _checkers_swig.GameState_At(self, i, j)
def Cell(self, i, j):
return _checkers_swig.GameState_Cell(self, i, j)
def KillingMovesFor(self, team):
return _checkers_swig.GameState_KillingMovesFor(self, team)
def NonKillingMovesFor(self, team):
return _checkers_swig.GameState_NonKillingMovesFor(self, team)
def KillingMovesForQueen(self, i, j):
return _checkers_swig.GameState_KillingMovesForQueen(self, i, j)
def KillingMovesForRegular(self, i, j):
return _checkers_swig.GameState_KillingMovesForRegular(self, i, j)
def NonKillingMovesForQueen(self, i, j):
return _checkers_swig.GameState_NonKillingMovesForQueen(self, i, j)
def NonKillingMovesForRegular(self, i, j):
return _checkers_swig.GameState_NonKillingMovesForRegular(self, i, j)
def Expand(self):
return _checkers_swig.GameState_Expand(self)
def Dump(self, stream):
return _checkers_swig.GameState_Dump(self, stream)
def __eq__(self, other):
return _checkers_swig.GameState___eq__(self, other)
__swig_destroy__ = _checkers_swig.delete_GameState
__del__ = lambda self: None
GameState_swigregister = _checkers_swig.GameState_swigregister
GameState_swigregister(GameState)
cvar = _checkers_swig.cvar
BoardSize = cvar.BoardSize
def GameState_CreateEmpty():
return _checkers_swig.GameState_CreateEmpty()
GameState_CreateEmpty = _checkers_swig.GameState_CreateEmpty
def GameState_Index(i, j):
return _checkers_swig.GameState_Index(i, j)
GameState_Index = _checkers_swig.GameState_Index
# This file is compatible with both classic and new-style classes.
| StarcoderdataPython |
1685284 | from invoke import task
@task
def start(ctx, docs=False):
ctx.run('FLASK_APP=toes_app.py FLASK_DEBUG=1 venv/bin/flask run --host=0.0.0.0')
@task
def update_dev(ctx, docs=False):
ctx.run('zappa update dev')
@task
def update_prod(ctx, docs=False):
ctx.run('zappa update prod')
@task
def tail(ctx, docs=False):
ctx.run('zappa tail prod --since 1m')
| StarcoderdataPython |
60834 | <gh_stars>1-10
# full assembly of the sub-parts to form the complete net
import torch.nn.functional as F
from .unet_parts import *
# from .mobilenet2 import *
def debug(str):
if False:
print(str)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 32)
self.down1 = down(32, 64)
self.down2 = down(64, 128)
self.down3 = down(128, 256)
self.down4 = down(256, 512)
self.down5 = down(512, 512)
self.up0 = up(1024, 256, bilinear=False)
self.up1 = up(512, 128,bilinear = False)
self.up2 = up(256, 64,bilinear = False)
self.up3 = up(128, 32,bilinear = False)
self.up4 = up(64, 16,bilinear = False)
self.outc1 = outconv(16, n_classes)
def forward(self, x):
x1 = self.inc(x)
debug('x1 shape is {}'.format(x1.shape))
x2 = self.down1(x1)
debug('x2 shape is {}'.format(x2.shape))
x3 = self.down2(x2)
debug('x3 shape is {}'.format(x3.shape))
x4 = self.down3(x3)
debug('x4 shape is {}'.format(x4.shape))
x5 = self.down4(x4)
debug('x5 shape is {}'.format(x5.shape))
x6 = self.down5(x5)
debug('x6 shape is {}'.format(x6.shape))
x = self.up0(x6, x5)
x = self.up1(x, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc1(x)
return x | StarcoderdataPython |
3300114 | #!/usr/bin/env python3
import sys
import os
import pprint
import matplotlib.pyplot as plt
import bloat_test
def rounded_kb(bytes):
x = round(bytes / 1024, 1)
if x >= 100:
return int(x)
return x
def get_test_prettyname(test):
names = {
'bloat-control': 'empty',
'bloat-scanf': '`std::scanf`',
'bloat-iostream': '`std::istream`',
'bloat-scnlib': '`scn::input`',
'bloat-scnlib-header-only': '`scn::input` (header-only)',
'bloat-scnlib-value': '`scn::scan_value`',
'bloat-scnlib-value-header-only': '`scn::scan_value` (header-only)',
}
return names[test]
def process_test(exec_dir, test):
size, stripped = bloat_test.run(os.path.join(exec_dir, test))
return {'test': test, 'testPretty': get_test_prettyname(test).replace("`", ""),
'testPrettyMD': get_test_prettyname(test),
'size': size, 'stripped': stripped,
'sizeH': bloat_test.convert_size(size), 'strippedH': bloat_test.convert_size(stripped),
'sizeKiB': rounded_kb(size), 'strippedKiB': rounded_kb(stripped)}
def make_plot(results, title):
results = list(reversed(results))
names = list(map(lambda x: x['testPretty'].replace(' ', '\n'), results))
plt.figure(figsize=(10, 10.5))
plt.suptitle(f'Executable size benchmarks: {title}')
a = plt.subplot(211)
a.barh(names, list(map(lambda x: x['size'], results)))
a.set_title('Executable size')
plt.xlabel('Size in KiB')
b = plt.subplot(212)
b.barh(names, list(map(lambda x: x['stripped'], results)))
b.set_title('Stripped size')
plt.xlabel('Size in KiB')
plt.show()
def main():
exec_dir = sys.argv[1]
title = sys.argv[2]
pp = pprint.PrettyPrinter(indent=4)
tests = [f for f
in os.listdir(exec_dir)
if os.path.isfile(os.path.join(exec_dir, f))
and f.startswith('bloat-')
and not f.endswith('stripped')
and not f.endswith('.py')]
presort_results = {item['test']: item for item in list(map(lambda test: process_test(exec_dir, test), tests))}
results = [
presort_results['bloat-control'],
presort_results['bloat-scanf'],
presort_results['bloat-iostream'],
presort_results['bloat-scnlib'],
presort_results['bloat-scnlib-header-only'],
presort_results['bloat-scnlib-value'],
presort_results['bloat-scnlib-value-header-only']
]
first_column_width = len(max(results, key=lambda x: len(x['testPrettyMD']))['testPrettyMD'])
execsize_txt = 'Executable size'
stripsize_txt = 'Stripped size'
print('Full results pretty-printed')
pp.pprint(results)
print('\n')
print('Formatted as markdown table (sorted by size, asc)')
print(f'| {"Method":{first_column_width}} | {execsize_txt} | {stripsize_txt} |')
print(f'| {":":-<{first_column_width}} | {":":->{len(execsize_txt)}} | {":":->{len(stripsize_txt)}} |')
for result in results:
print(
f'| {result["testPrettyMD"]:{first_column_width}} | {result["sizeKiB"]:>{len(execsize_txt)}} | {result["strippedKiB"]:>{len(stripsize_txt)}} |')
print('\n')
make_plot(results, title)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4832930 | <reponame>pagotti/dasladen
"""
Driver Module
Oracle Driver External Dependencies:
- cx_oracle: (c) 2016, 2017, Oracle and/or its affiliates.
Portions Copyright 2007-2015, <NAME>.
Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta, Canada.
BSD License (https://github.com/oracle/python-cx_Oracle/blob/master/LICENSE.txt)
MySQL Driver External Dependencies:
- PyMySQL: (c) 2010, 2013 PyMySQL contributors - MIT License (https://pypi.python.org/pypi/PyMySQL/0.7.1)
MSSQL Driver (via ODBC) External Dependencies:
- pyodbc: (c) <NAME> - MIT License (https://github.com/mkleehammer/pyodbc)
Postgres Driver External Dependencies:
- psycopg2: (c) <NAME>, <NAME>, <NAME> - LGPL License (https://github.com/psycopg/psycopg2)
Features:
- Connection to MS SQL
- Connection to MySQL
- Connection to Oracle
- Connection to PostgreSQL
"""
import os
from . import compat
try:
import cx_Oracle as oracle
except ImportError:
pass
try:
import pyodbc as odbc
except ImportError:
pass
try:
import pymysql as mysql
except ImportError:
pass
try:
import psycopg2 as postgres
import psycopg2.extras as postres_extras
except ImportError:
pass
def get_env(value):
"""Return the [VAR] environment variable if starts with $env.[VAR]"""
if len(value) > 5 and value.startswith("$env."):
return os.environ[value[5:]]
else:
return value
class CursorProxy(object):
"""Proxy for cursor that not has support a executemany with iterators"""
def __init__(self, cursor):
self._cursor = cursor
def executemany(self, statement, parameters, **kwargs):
# convert parameters to a list
parameters = list(parameters)
# pass through to proxy cursor
return self._cursor.executemany(statement, parameters, **kwargs)
def __getattr__(self, item):
return getattr(self._cursor, item)
class OracleDriver(object):
"""Driver for Oracle connections"""
def __init__(self, config):
self.config = config
def get_db(self):
conn = self.config
host_address = conn.get("host", "localhost")
port = conn.get("port", "1521")
str_conn = "{}/{}@{}:{}/{}".format(get_env(conn["user"]), get_env(conn["pass"]),
host_address, port, conn["service"])
db = oracle.connect(str_conn)
db.outputtypehandler = self.output_type_handler
if "initializing" in conn:
for sql in conn["initializing"]:
db.cursor().execute(sql)
return db
def output_type_handler(self, cursor, name, defaultType, size, precision, scale):
if defaultType in (STRING, FIXED_CHAR):
if compat.PY2:
return cursor.var(unicode, size, cursor.arraysize)
else:
return cursor.var(cx_Oracle.STRING, size, cursor.arraysize)
def cursor(self, db):
return CursorProxy(db.cursor())
class MSSQLDriver(object):
"""Driver for MS SQL connections via ODBC"""
def __init__(self, config):
self.config = config
def get_db(self):
conn = self.config
db_charset = "CHARSET={};".format(conn["charset"]) if "charset" in conn else ""
host_address = conn.get("host", "(local)")
port = conn.get("port", "1433")
if not conn["user"]:
str_conn = ("DRIVER={{SQL Server}};SERVER={};"
"PORT={};DATABASE={};Trusted_Connection=yes;{}").format(
host_address, port,
conn["database"],
db_charset)
else:
str_conn = ("DRIVER={{SQL Server}};SERVER={};"
"PORT={};DATABASE={};UID={};PWD={};{}").format(
host_address, port,
conn["database"],
get_env(conn["user"]),
get_env(conn["pass"]),
db_charset)
db = odbc.connect(str_conn)
if "initializing" in conn:
for sql in conn["initializing"]:
db.cursor().execute(sql)
return db
def cursor(self, db):
return db.cursor()
class MySQLDriver(object):
"""Driver for MySQL connections"""
def __init__(self, config):
self.config = config
def get_db(self):
conn = self.config
db_charset = conn.get("charset", "utf8")
host_address = conn.get("host", "localhost")
port = conn.get("port", 3306)
db = mysql.connect(host=host_address,
port=port,
user=get_env(conn["user"]),
password=<PASSWORD>(conn["pass"]),
database=conn["database"],
charset=db_charset,
local_infile=1)
# needed for petl work correctly
db.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
if "initializing" in conn:
for sql in conn["initializing"]:
db.cursor().execute(sql)
return db
def cursor(self, db):
return db.cursor()
class PostgreBatchCursor():
"""Proxy that bypass executemany and run execute_batch on psycopg2 """
def __init__(self, cursor):
self._cursor = cursor
def executemany(self, statement, parameters, **kwargs):
return postres_extras.execute_batch(self._cursor, statement, parameters, **kwargs)
def __getattr__(self, item):
return getattr(self._cursor, item)
class PostgreSQLDriver(object):
"""Driver for PostgreSQL connections"""
def __init__(self, config):
self.config = config
def get_db(self):
conn = self.config
db_charset = conn.get("charset", "utf8")
host_address = conn.get("host", "localhost")
port = conn.get("port", 5432)
db = postgres.connect(host=host_address,
port=port,
user=get_env(conn["user"]),
password=<PASSWORD>_env(conn["pass"]),
database=conn["database"],
client_encoding=db_charset)
if "initializing" in conn:
for sql in conn["initializing"]:
db.cursor().execute(sql)
return db
def cursor(self, db):
return PostgreBatchCursor(db.cursor())
| StarcoderdataPython |
4823445 | <reponame>tankishev/Python
# Until the "Sail" command is given, you will be receiving:
# • You and your crew have targeted cities, with their population and gold, separated by "||".
# • If you receive a city that has already been received, you have to increase the population and gold with the given values.
# After the "Sail" command, you will start receiving lines of text representing events until the "End" command is given.
# Events will be in the following format:
# • "Plunder=>{town}=>{people}=>{gold}"
# o You have successfully attacked and plundered the town, killing the given number of people and stealing the respective amount of gold.
# o For every town you attack print this message: "{town} plundered! {gold} gold stolen, {people} citizens killed."
# o If any of those two values (population or gold) reaches zero, the town is disbanded.
# You need to remove it from your collection of targeted cities and print the following message: "{town} has been wiped off the map!"
# o There will be no case of receiving more people or gold than there is in the city.
# • "Prosper=>{town}=>{gold}"
# o There has been dramatic economic growth in the given city, increasing its treasury by the given amount of gold.
# o The gold amount can be a negative number, so be careful. If a negative amount of gold is given, print: "Gold added cannot be a negative number!" and ignore the command.
# o If the given gold is a valid amount, increase the town's gold reserves by the respective amount and print the following message:
# "{gold added} gold added to the city treasury. {town} now has {total gold} gold."
# Input
# • On the first lines, until the "Sail" command, you will be receiving strings representing the cities with their gold and population, separated by "||"
# • On the following lines, until the "End" command, you will be receiving strings representing the actions described above, separated by "=>"
# Output
# • After receiving the "End" command, if there are any existing settlements on your list of targets, you need to print all of them, in the following format:
# "Ahoy, Captain! There are {count} wealthy settlements to go to:
# {town1} -> Population: {people} citizens, Gold: {gold} kg
# {town2} -> Population: {people} citizens, Gold: {gold} kg
# …
# {town…n} -> Population: {people} citizens, Gold: {gold} kg"
# • If there are no settlements left to plunder, print:
# "Ahoy, Captain! All targets have been plundered and destroyed!"
targets = {}
while True:
input_line = input()
if input_line == 'Sail':
break
town, pop, gold = input_line.split('||')
if town in targets:
targets[town]['pop'] += int(pop)
targets[town]['gold'] += int(gold)
else:
targets[town] = {'pop': int(pop), 'gold': int(gold)}
while True:
input_line = input()
if input_line == 'End':
break
tokens = input_line.split('=>')
command, town = tokens[0], tokens[1]
if command == 'Plunder':
people, gold = int(tokens[2]), int(tokens[3])
kills = min(people, targets[town]['pop'])
gold_plundered = min(gold, targets[town]['gold'])
targets[town]['pop'] -= people
targets[town]['gold'] -= gold
print(f"{town} plundered! {gold} gold stolen, {people} citizens killed.")
if targets[town]['gold'] <= 0 or targets[town]['pop'] <= 0:
targets.pop(town)
print(f"{town} has been wiped off the map!")
elif command == 'Prosper':
gold = int(tokens[2])
if gold < 0:
print('Gold added cannot be a negative number!')
else:
targets[town]['gold'] += gold
print(f"{gold} gold added to the city treasury. {town} now has {targets[town]['gold']} gold.")
if len(targets) == 0:
print("Ahoy, Captain! All targets have been plundered and destroyed!")
else:
print(f"Ahoy, Captain! There are {len(targets)} wealthy settlements to go to:")
output = ['{0} -> Population: {1} citizens, Gold: {2} kg'.format(town, stats['pop'], stats['gold']) for town, stats in targets.items()]
print(*output, sep='\n')
| StarcoderdataPython |
1643667 | import GPy
import scipy.optimize
import scipy.stats as stats
from scipy.spatial.kdtree import KDTree
from general.utils import *
class Sensor:
"""
Sensor model is designed to compute the possibility of getting a certain observation at a certain position.
In this implementation, gaussian process with path loss model as mean function is used.
"""
# General
X_ = None # Position data matrix: expected to be n x 2
Z_ = None # RSS reading matrix: expected to be n x m
n_ap_ = None # Literally: the number of access points, positive integer
# Path loss parameters
path_loss_params_ = None # The parameters used to compute path loss model, expected to be 4 x n_ap_
epsilon_ = 1e-3 # The term used to prevent distance evaluation between reference positions and access point positions from zero
penalty_factor_ = 0.02 # The term used to penalize "bad" path loss predictions
delta_ = 0.01 # Small interval for computing probability
weight_scale_ = 2 # The scaling factor used to supress low likelihood
# GP
GP_ = None # Gaussian process model
def _init__(self):
"""
Initializer
"""
return None
def setData(self, X, Z, verbose=False):
"""
Set the data and parameters
Args:
X: n x 2 array, position data
Z: n x m array, normalized rss reading
verbose: Boolean, True to display execution details
"""
assert X.shape[1] == 2
assert X.shape[0] == Z.shape[0]
self.X_ = X
self.Z_ = Z
self.n_ap_ = Z.shape[1]
self.path_loss_params_ = np.zeros((4, self.n_ap_))
if verbose:
print("The position data has dimension of:\n", self.X_.shape)
print("The RSSI reading has dimension of:\n", self.Z_.shape)
print("The path loss parameters have been initialized to:\n", self.path_loss_params_)
def setParameters(self, epsilon, penalty_factor, delta, weight_scale, verbose=False):
"""
Set the parameters
Args:
epsilon: float, minimum value to prevent evaluation of distance between reference points and access points to be zero
penalty_factor: float, greater than zero, used to penalize zero rss value from path loss evaluation
verbose: Boolean, True to display execution details
"""
assert penalty_factor > 0
self.epsilon_ = epsilon
self.penalty_factor_ = penalty_factor
self.delta_ = delta
self.weight_scale_ = weight_scale
if verbose:
print("Epsilon has been set to:\n", epsilon)
print("Penalty has beeb set to:\n", penalty_factor)
print("Delta has beeb set to:\n", delta)
print("Weight scale has beeb set to:\n", weight_scale)
def optimize(self):
"""
Calculate the path loss model and gaussian process parameters
"""
self.initializePathLossParameters()
self.calculatePathlossModel()
self.calculateGP()
def initializePathLossParameters(self, verbose=False):
"""
Initialize path loss parameters, the potential AP position will be initialized to where the corresponding reading are
the biggest.
Args:
verbose: Boolean, True to display execution details
"""
self.path_loss_params_[0, :] = 0.9
self.path_loss_params_[1, :] = 1.5
indexes = np.argmax(self.Z_, axis=0)
for i in range(self.n_ap_):
self.path_loss_params_[2:4, i] = self.X_[indexes[i], :]
if verbose:
print("The maximum indexes:\n", indexes)
print("Initialized path loss parameters:\n", self.path_loss_params_)
def calculatePathlossValue(self, x, parameters, epsilon, verbose=False):
"""
Calculate the path loss of RSSi at position X from a given access point specified by parameters.
pl =p0 - k*log(|x - x_ap|) where p0 is the signal strength at 1m, k is the decaying factor, x is location, x_ap is the ap position.
Args:
x: n x 2 numpy array, the position data
parameters: 4 x 1 or 4 x m numpy array (in this case, x has to be 1 x 2), with each column in the form of [p0, k, x, y]
epsilon: float, to prevent distance to be 0
verbose: Boolean, to publish calculation details
Return:
float, or m x 1 numpy array, path loss prediction for singal access point or all access point at given location
"""
x = x.reshape(-1, 2)
p0 = parameters[0] # Signal strength at ...
k = parameters[1] # Decaying factor
x_ap = parameters[2:4].T # x,y coordinats of access points
d = np.sum((x - x_ap)**2, axis=1)**0.5 + epsilon
pl = p0 - k*np.log10(d)
pl = np.clip(pl, 0, 1).astype(float)
# Debug only
if verbose:
print("Parameters:\n", parameters)
print("Position vector:\n", x)
print("AP position:\n", x_ap)
print("Positional difference:\n", x - x_ap)
print("Squared difference:\n", (x - x_ap)**2)
print("Distance:\n", d)
print("Path loss value:\n", pl)
return pl
@staticmethod
def func(parameters, *args):
"""
Calculate the path loss for a certain access point, constant terms have been removed.
Args:
parameters: 4 x 1 numpy array, the form [p0, k, x, y]
args:
[0] -> X, n x 2 numpy array, position data
[1] -> Z, n x 1 numpy array, RSS reading from the same access point collected at different positions.
[2] -> epsilon, float, used to prevent distance evaluation from being zero
[3] -> penalty, penalty factor, positive number, used to penalize zero reading as those number indicate inability of network interface
[4] -> evaluate_function, to calculate path loss
[5] -> verbose, Boolean, True to display calculation details
Return:
float, cost
"""
# Unfold arguments
X = args[0]
Z = args[1]
epsilon = args[2]
penalty = args[3]
evaluate_function = args[4]
verbose = args[5]
# Compute the path loss
pl = evaluate_function(X, parameters, epsilon)
# Compute the weights
sign = Z <= 0.1
deviation = sign*penalty + 0.01
# Display calculation details if enabled
if verbose:
print("Path loss estimation:\n", pl)
print("Deviation:\n", deviation)
log_prob = stats.norm.logpdf(pl, loc=Z, scale=deviation)
return -np.sum(log_prob)
def calculatePathlossModel(self, verbose=False):
"""
Calculate path loss parameters for each access point.
Args:
verbose: Boolean, to publish execution details
"""
# Define the bound
bounds = [(0.8, 1), (0.5, 2.5), (-np.inf, np.inf), (-np.inf, np.inf)]
# Compute parameters for each access point
for i in range(self.n_ap_):
if verbose:
print("Compute path loss parameters for ", i+1, "th access point.")
# Define arguments
arg_list = (self.X_, self.Z_[:, i], self.epsilon_, self.penalty_factor_, self.calculatePathlossValue, False)
# Optimize
result = scipy.optimize.minimize(self.func, x0=self.path_loss_params_[:, i], bounds = bounds, args=arg_list)
# Refill optimized parameters
self.path_loss_params_[:, i] = result.x
# ...
if verbose:
print("Optimized path loss parameters:\n", self.path_loss_params_)
def calculateGP(self, verbose=False):
"""
Calculate gaussian process, refer to https://gpy.readthedocs.io/en/deploy/GPy.models.html for more information about GPy
Args:
verbose, Boolean, True to display calculation details
"""
Z_predict = np.zeros(self.Z_.shape)
# Calculate path loss prediction for each access point
for i in range(self.n_ap_):
Z_predict[:,i] = self.calculatePathlossValue(self.X_, self.path_loss_params_[:, i], self.epsilon_)
self.GP_ = GPy.models.GPRegression(self.X_, (self.Z_ - Z_predict))
self.GP_.optimize()
if verbose:
print("Difference between Z predict and Z:\n", np.sum(abs(self.Z_ - Z_predict)))
print("Optimized GP:", self.GP_)
def predict(self, x, observation, verbose=False):
"""
Calculate gaussian process, refer to https://gpy.readthedocs.io/en/deploy/GPy.models.html for more information about GPy
x = (x' - mean)/(variance^1/2)
cdf(z) = 1/2*[1 + erf(z/sqrt(2))] (standard normal distribution)
Args:
x: n x 2 numpy array, positions to predict
observation: 1 x n_ap or (n_ap, ) numpy array, normalized rss reading
verbose, Boolean, True to display calculation details
return:
mean, variance, probability
"""
start = time.time()
# Calculate mean from path loss
mean = np.zeros((x.shape[0], self.n_ap_))
# Calculate path loss prediction for each access point
for i in range(self.n_ap_):
mean[:,i] = self.calculatePathlossValue(x, self.path_loss_params_[:, i], self.epsilon_)
# Calculate mean and variance
u, v = self.GP_.predict(x)
mean = np.clip(mean + u, 0, 1)
variance = np.ones(self.n_ap_)*v
# Compute probability
standardized = (observation - mean)/variance**0.5
probability = stats.norm.pdf(standardized)
probability = np.product(probability, axis=1)
probability = probability**(1/self.n_ap_)
# Boost probability
max_probability = np.max(probability)
method = np.vectorize(lambda x: x * self.weight_scale_ if (max_probability - x) <= max_probability/2 else x / self.weight_scale_)
probability = method(probability)
end = time.time()
if verbose:
print("Prediction took ", end-start, " seconds.")
return probability
def saveModel(self, path_loss_file, gp_file):
"""
Save training results to specified files
Args:
path_loss_file: string, csv file to save path loss parameters
gp_file: string, file name with no extension, to which the traned GP model will be saved
"""
# Save path loss parameters
pass_loss_parameters = self.path_loss_params_.tolist()
for i in range(len(pass_loss_parameters)):
if i == 0:
option = 'w'
else:
option = 'a'
line = concatenateString([str(x) for x in pass_loss_parameters[i]])
writeLineToFile(path_loss_file, line, option)
# Save GP model
self.GP_.save_model(gp_file)
print("Model has been saves sucessfully.")
def loadModel(self, path_loss_file, gp_file):
"""
Load training results from specified files
Args:
path_loss_file: string, csv file wehre path loss parameters are saved
gp_file: string, file name with ".zip" extension, in which the traned GP model is saved
"""
# Load path_loss_file
parameters = readData(path_loss_file, True)
self.n_ap_ = len(parameters[0])
self.path_loss_params_ = np.zeros((len(parameters), self.n_ap_))
for i in range(len(parameters)):
self.path_loss_params_[i] = np.array(parameters[i])
# Load GP
self.GP_ = GPy.core.gp.GP.load_model(gp_file)
def __del__(self):
"""
The destructor of the class
"""
return None
class KNN:
"""
KNN predictor is used to predict probability of getting a certain observation at a location
"""
# General
X_ = None # Position data matrix: expected to be n x 2
Z_ = None # RSS reading matrix: expected to be n x m
kdtree_ = None
n_ap_ = None
k_ = 6
threshold_ = 1
delta_ = 0.02
variance_ = 0.0004
def _init__(self):
"""
Initializer
"""
return None
def setData(self, X, Z):
"""
Set the data and parameters
Args:
X: n x 2 array, position data
Z: n x m array, normalized rss reading
verbose: Boolean, True to display execution details
"""
assert X.shape[1] == 2
assert X.shape[0] == Z.shape[0]
self.X_ = X
self.Z_ = Z
self.kdtree_ = KDTree(self.X_)
self.n_ap_ = Z.shape[1]
def loadData(self, coordinates_file, rss_file):
"""
Load data from specified data path and set internal states
Args:
coordinates_file: string, coordinates data file corresponding to specified rss data file
rss_file: string, processed rss data file (filtered, normalized and averaged)
"""
X = np.array(readData(coordinates_file, True, ' '))[:, 0:2]
Z = np.array(readData(rss_file, True))
self.setData(X, Z)
def getClosestDistance(self, x, number=1):
"""
Return the closest distances between references and provided point(s)
Args:
x: n x 2 numpy array, positions to query
"""
distance_vector, _ = self.kdtree_.query(x, k=number)
return np.array(distance_vector).reshape(-1)
def predict(self, x, observation, verbose=False):
"""
Predict the likelihood of getting observation at position x based on closest reference point
Args:
x: n x 2 numpy array, positions to predict
observation: 1 x n_ap or (n_ap, ) numpy array, normalized rss reading
verbose, Boolean, True to display calculation details
return:
probability
"""
start = time.time()
x = x.reshape(-1, 2)
# Calculate closet points
distance_vector, closest_indexes = self.kdtree_.query(x, k=self.k_)
# Reshape
distance_vector = np.array([distance_vector]).reshape(-1)
closest_indexes = np.array([closest_indexes]).reshape(-1)
# Find references
references = self.Z_[closest_indexes]
# Compute weighted probability
lb = (references-observation-self.delta_)/(2*self.variance_)**0.5
ub = (references-observation+self.delta_)/(2*self.variance_)**0.5
probability = 1/2*(scipy.special.erf(ub)-scipy.special.erf(lb))
probability = np.product(probability, axis=1)**(1/self.n_ap_)
weighted_probability = np.array([self.computeWeightedProbability(probability, distance_vector, i*self.k_, (i+1)*self.k_) for i in range(x.shape[0])])
end = time.time()
if verbose:
print("Prediction took ", end-start, " seconds.")
return weighted_probability
def computeWeightedProbability(self, probability, distance_vector, start, end):
"""
Compute weighted probability based on distance
"""
mask = distance_vector[start:end] <= self.threshold_
weights = mask + (1- mask)*(0.1/(distance_vector[start:end] + 1e-2)) # 1e-2 to prevent "divide by 0"
return np.sum(probability[start:end]*weights)
def __del__(self):
"""
The destructor of the class
"""
return None
class Hybrid:
"""
Hybrid predictor to estimate probability of getting a certain observation at a location
"""
knn_ = None
estimator_ = None
threshold_ = 1
def __init__(self):
"""
Initializer
"""
return None
def loadModel(self, coordinates_file, rss_file, path_loss_file, gp_file):
"""
Load data from specified data path and set internal states
Args:
coordinates_file: string, coordinates data file corresponding to specified rss data file
rss_file: string, processed rss data file (filtered, normalized and averaged)
path_loss_file: string, csv file wehre path loss parameters are saved
gp_file: string, file name with ".zip" extension, in which the traned GP model is saved
"""
self.knn_ = KNN()
self.knn_.loadData(coordinates_file, rss_file)
self.estimator_ = Sensor()
self.estimator_.loadModel(path_loss_file, gp_file)
def predict(self, x, observation, verbose=False):
"""
Predict the likelihood of getting observation at position x by using hybrid model
Args:
x: n x 2 numpy array, positions to predict
observation: 1 x n_ap or (n_ap, ) numpy array, normalized rss reading
verbose, Boolean, True to display calculation details
return:
probability
"""
x = x.reshape(-1, 2)
distance_vector = self.knn_.getClosestDistance(x)
probability = np.zeros(distance_vector.shape)
knn_indexes = np.argwhere(distance_vector < self.threshold_).reshape(-1)
estimator_indexes = np.argwhere(distance_vector >= self.threshold_).reshape(-1)
if np.size(knn_indexes) is 0:
probability = self.estimator_.predict(x, observation)
elif np.size(estimator_indexes) is 0:
probability = self.knn_.predict(x, observation)
else:
probability[knn_indexes] = self.knn_.predict(x[knn_indexes], observation)
probability[estimator_indexes] = self.estimator_.predict(x[estimator_indexes], observation).reshape(-1)
return probability | StarcoderdataPython |
3233390 | <reponame>aaren/pharminv
from io import BytesIO
import numpy as np
import numpy.testing as nt
import harminv
class ReferenceData(object):
# output generated from command line use of harminv
# cat tests/input.dat | harminv -t 0.01 0.001-1
cli_output = """frequency, decay constant, Q, amplitude, phase, error
-0.506426, 3.072252e-03, 517.856, 0.507868, -0.200813, 2.128453e-04
-0.301698, 6.482521e-04, 1462.11, 0.49748, -0.0520909, 7.269295e-05
-0.104134, 9.490460e-04, 344.711, 0.481868, -0.129022, 1.780971e-04
0.104119, 9.403484e-04, 347.851, 0.481781, 0.128571, 1.766600e-04
0.301435, 6.051884e-04, 1564.78, 0.497346, 0.0438478, 6.678115e-05
0.50092, 3.922559e-04, 4011.89, 0.499714, 0.0283455, 3.297469e-05
1.10893, -2.500241e-01, -13.9339, 0.000228962, 1.03671, 6.522260e-04
"""
refp = BytesIO(cli_output.encode())
data = np.genfromtxt(refp, delimiter=',', names=True)
def __getitem__(self, key):
return self.data[key]
def create_signal():
"""Create the test signal.
N.B the random component varies the results slightly. If you
regenerate the test data then you will need to update
ReferenceData.cli_output.
"""
tau = 2 * np.pi
dt = 0.01
time = np.arange(1000) * dt
noise = 0.1 * np.random.random(1000)
signal = np.cos(tau * 0.1 * time) \
+ np.cos(tau * 0.3 * time) \
+ np.cos(tau * 0.5 * time)
return noise + signal
def write_signal(signal):
with open('tests/input.dat', 'w') as f:
signal.tofile(f, sep=' ')
def read_signal():
return np.fromfile('tests/input.dat', sep=' ')
def test_harminv():
refdata = ReferenceData()
signal = read_signal()
harm = harminv.Harminv(signal=signal, fmin=0.001, fmax=1, nf=100, dt=0.01)
nt.assert_allclose(harm.freq, refdata['frequency'], rtol=1e-5)
nt.assert_allclose(harm.decay, refdata['decay_constant'], rtol=1e-4)
nt.assert_allclose(harm.Q, refdata['Q'], rtol=1e-4)
nt.assert_allclose(harm.amplitude, refdata['amplitude'], rtol=1e-4)
nt.assert_allclose(harm.phase, refdata['phase'], rtol=1e-4)
nt.assert_allclose(harm.error, refdata['error'], rtol=1e-4)
for i in range(harm.freq.size):
print("%g, %e, %g, %g, %g, %e" % (harm.freq[i],
harm.decay[i],
harm.Q[i],
harm.amplitude[i],
harm.phase[i],
harm.error[i]))
| StarcoderdataPython |
1753037 | <filename>nlp100/chapter-01/04.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
s =""
s = s.replace(".","")
words = s.split(" ")
word_count = []
count = 0
for word in words:
count+=1
if count in [1,5,6,7,9,16,19]:
word_count.append(word[:1])
else:
word_count.append(word[:2])
print(word_count)
#リスト内包表記を上手く使えばもっと上手く書けそう
| StarcoderdataPython |
51091 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
permission = models.ForeignKey('AuthPermission', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.IntegerField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=254)
is_staff = models.IntegerField()
is_active = models.IntegerField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
permission = models.ForeignKey(AuthPermission, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.PositiveSmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class DotaAdmin(models.Model):
admin = models.ForeignKey('DotaUser', models.DO_NOTHING, primary_key=True)
admin_registration_number = models.CharField(unique=True, max_length=20)
class Meta:
managed = False
db_table = 'dota_admin'
class DotaGamer(models.Model):
gamer = models.ForeignKey('DotaUser', models.DO_NOTHING, primary_key=True)
gamer_ign = models.CharField(max_length=20)
class Meta:
managed = False
db_table = 'dota_gamer'
class DotaGamerMatch(models.Model):
matchid = models.ForeignKey('DotaMatch', models.DO_NOTHING, db_column='matchid', primary_key=True)
match_gpm = models.IntegerField(db_column='match_GPM') # Field name made lowercase.
match_kills = models.IntegerField(db_column='match_Kills') # Field name made lowercase.
match_xpm = models.IntegerField(db_column='match_XPM') # Field name made lowercase.
match_death = models.IntegerField()
match_assist = models.IntegerField()
gamerid = models.ForeignKey(DotaGamer, models.DO_NOTHING, db_column='gamerid')
dota_gamer_matchcol = models.CharField(max_length=45, blank=True, null=True)
match_status = models.CharField(max_length=45)
class Meta:
managed = False
db_table = 'dota_gamer_match'
class DotaMatch(models.Model):
match_id = models.IntegerField(db_column='match_ID', primary_key=True) # Field name made lowercase.
match_type = models.CharField(db_column='match_Type', max_length=15) # Field name made lowercase.
match_duration = models.CharField(db_column='match_Duration', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_match'
class DotaMmr(models.Model):
mmr = models.ForeignKey(DotaGamer, models.DO_NOTHING, db_column='mmr_Id', primary_key=True) # Field name made lowercase.
mmr_score = models.BigIntegerField()
mmr_medal = models.CharField(max_length=30)
class Meta:
managed = False
db_table = 'dota_mmr'
class DotaPremiumuser(models.Model):
premiumuser_registration_number = models.BigIntegerField(db_column='premiumuser_Registration_Number', primary_key=True) # Field name made lowercase.
premiumuser_registrationexpirydate = models.CharField(db_column='premiumuser_RegistrationExpiryDate', max_length=30) # Field name made lowercase.
premiumuser_gamer = models.ForeignKey(DotaGamer, models.DO_NOTHING, db_column='premiumuser_Gamer_ID') # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_premiumuser'
class DotaTournament(models.Model):
tournament_id = models.IntegerField(db_column='Tournament_ID', primary_key=True) # Field name made lowercase.
tournament_name = models.CharField(db_column='Tournament_name', max_length=100) # Field name made lowercase.
tournament_starting_timedate = models.DateTimeField(db_column='Tournament_starting_timedate') # Field name made lowercase.
tournament_end_timedate = models.DateTimeField(db_column='Tournament_end_timedate') # Field name made lowercase.
tournament_prize = models.CharField(db_column='Tournament_prize', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_tournament'
class DotaTournamentMatch(models.Model):
matchid = models.ForeignKey(DotaMatch, models.DO_NOTHING, db_column='Matchid', primary_key=True) # Field name made lowercase.
tournamentid = models.ForeignKey(DotaTournament, models.DO_NOTHING, db_column='Tournamentid') # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_tournament_match'
class DotaUser(models.Model):
user_id = models.BigIntegerField(primary_key=True)
user_name = models.CharField(max_length=45)
user_email = models.CharField(max_length=45)
user_username = models.CharField(unique=True, max_length=30)
user_password = models.CharField(max_length=30)
class Meta:
managed = False
db_table = 'dota_user'
| StarcoderdataPython |
1794805 | <gh_stars>1-10
import json
buildings = ["HEADQUARTER", "BARRACKS", "STABLE", "WORKSHOP", "ACADEMY", "SMITHY", "RALLY_POINT", "STATUE", "MARKET",
"TIMBER_CAMP", "CLAY_PIT", "IRON_MINE", "FARM", "WAREHOUSE", "HIDING_PLACE", "WALL"]
requirements = [
{},
{"HEADQUARTER": 3},
{"HEADQUARTER": 10, "BARRACKS": 5, "SMITHY": 5},
{"HEADQUARTER": 10, "SMITHY": 10},
{"HEADQUARTER": 20, "SMITHY": 20, "MARKET": 10},
{"HEADQUARTER": 5, "BARRACKS": 1},
{},
{},
{"HEADQUARTER": 3, "WAREHOUSE": 2},
{},
{},
{},
{},
{},
{},
{"BARRACKS": 1}
]
levels = [
[
[[90, 80, 70, 5, 5], 0.95],
[[113, 102, 88, 1, 6], 0.91],
[[143, 130, 111, 1, 7], 0.86],
[[180, 166, 140, 1, 8], 0.82],
[[227, 211, 176, 1, 9], 0.78],
[[286, 270, 222, 2, 11], 0.75],
[[360, 344, 280, 2, 13], 0.71],
[[454, 438, 353, 2, 15], 0.68],
[[572, 559, 445, 3, 18], 0.64],
[[720, 712, 560, 3, 21], 0.61],
[[908, 908, 706, 3, 24], 0.58],
[[1144, 1158, 890, 4, 28], 0.56],
[[1441, 1476, 1121, 5, 33], 0.53],
[[1816, 1882, 1412, 5, 38], 0.51],
[[2288, 2400, 1779, 7, 45], 0.48],
[[2883, 3060, 2242, 8, 53], 0.46],
[[3632, 3902, 2825, 9, 62], 0.44],
[[4577, 4975, 3560, 10, 72], 0.42],
[[5767, 6343, 4485, 12, 84], 0.40],
[[7266, 8087, 5651, 15, 99], 0.38],
[[9155, 10, 311, 7120, 17, 116], 0.36],
[[11535, 13146, 8972, 19, 135], 0.34],
[[14534, 16762, 11304, 23, 158], 0.33],
[[18313, 21371, 14244, 27, 185], 0.31],
[[23075, 27248, 17947, 31, 216], 0.30],
[[29074, 34741, 22613, 37, 253], 0.28],
[[36633, 44295, 28493, 43, 296], 0.27],
[[46158, 56476, 35901, 51, 347], 0.26],
[[58159, 72007, 45235, 59, 406], 0.24],
[[73280, 91809, 56996, 69, 475], 0.23]
],
[
[[200, 170, 90, 7, 7], 0.63],
[[252, 218, 113, 1, 8], 0.59],
[[318, 279, 143, 2, 10], 0.56],
[[400, 357, 180, 1, 11], 0.53],
[[504, 456, 227, 2, 13], 0.50],
[[635, 584, 286, 2, 15], 0.47],
[[800, 748, 360, 3, 18], 0.44],
[[1008, 957, 454, 3, 21], 0.42],
[[1271, 1225, 572, 4, 25], 0.39],
[[1601, 1568, 720, 4, 29], 0.37],
[[2017, 2007, 908, 5, 34], 0.35],
[[2542, 2569, 1144, 5, 39], 0.33],
[[3202, 3288, 1441, 7, 46], 0.31],
[[4035, 4209, 1816, 8, 54], 0.29],
[[5084, 5388, 2288, 9, 63], 0.28],
[[6406, 6896, 2883, 11, 74], 0.26],
[[8072, 8827, 3632, 12, 86], 0.25],
[[10170, 11298, 4577, 15, 101], 0.23],
[[12814, 14462, 5767, 17, 118], 0.22],
[[16146, 18511, 7266, 20, 138], 0.21],
[[20344, 23695, 9155, 24, 162], 0.20],
[[25634, 30329, 11535, 27, 189], 0.19],
[[32298, 38821, 14534, 32, 221], 0.17],
[[40696, 49691, 18313, 38, 259], 0.16],
[[51277, 63605, 23075, 44, 303], 0.15]
],
[
[[270, 240, 260, 8, 8], 0.63],
[[340, 307, 328, 1, 9], 0.59],
[[429, 393, 413, 2, 11], 0.56],
[[540, 503, 520, 2, 13], 0.53],
[[681, 644, 655, 2, 15], 0.5],
[[857, 825, 826, 3, 18], 0.47],
[[1080, 1056, 1040, 3, 21], 0.44],
[[1361, 1351, 1311, 3, 24], 0.42],
[[1715, 1729, 1652, 4, 28], 0.39],
[[2161, 2214, 2081, 5, 33], 0.37],
[[2723, 2833, 2622, 5, 38], 0.35],
[[3431, 3627, 3304, 7, 45], 0.33],
[[4323, 4642, 4163, 8, 53], 0.31],
[[5447, 5942, 5246, 9, 62], 0.29],
[[6864, 7606, 6609, 10, 72], 0.28],
[[8648, 9736, 8328, 12, 84], 0.26],
[[10897, 12462, 10493, 15, 99], 0.25],
[[13730, 15951, 13221, 16, 115], 0.23],
[[17300, 20417, 16659, 20, 135], 0.22],
[[21797, 26134, 20990, 23, 158], 0.21]
],
[
[[300, 240, 260, 8, 8], 0.63],
[[378, 307, 328, 1, 9], 0.59],
[[476, 393, 413, 2, 11], 0.56],
[[600, 503, 520, 2, 13], 0.53],
[[756, 644, 655, 2, 15], 0.5],
[[953, 825, 826, 3, 18], 0.47],
[[1200, 1056, 1040, 3, 21], 0.44],
[[1513, 1351, 1311, 3, 24], 0.42],
[[1906, 1729, 1652, 4, 28], 0.39],
[[2401, 2214, 2081, 5, 33], 0.37],
[[3026, 2833, 2622, 5, 38], 0.35],
[[3812, 3627, 3304, 7, 45], 0.33],
[[4804, 4642, 4163, 8, 53], 0.31],
[[6053, 5942, 5246, 9, 62], 0.29],
[[7626, 7606, 6609, 10, 72], 0.28]
],
[
[[15000, 25000, 10000, 80, 80], 0.63],
[[30000, 50000, 20000, 14, 94], 0.59],
[[60000, 100000, 40000, 16, 110], 0.56]
],
[
[[220, 180, 240, 20, 20], 0.91],
[[277, 230, 302, 3, 23], 0.83],
[[349, 293, 381, 4, 27], 0.75],
[[440, 373, 480, 5, 32], 0.68],
[[555, 476, 605, 5, 37], 0.62],
[[699, 606, 762, 7, 44], 0.56],
[[880, 773, 960, 7, 51], 0.51],
[[1109, 986, 1210, 9, 60], 0.47],
[[1398, 1257, 1525, 10, 70], 0.42],
[[1761, 1603, 1921, 12, 82], 0.39],
[[2219, 2043, 2421, 14, 96], 0.35],
[[2796, 2605, 3050, 16, 112], 0.32],
[[3523, 3322, 3843, 20, 132], 0.29],
[[4439, 4236, 4842, 22, 154], 0.26],
[[5593, 5400, 6101, 26, 180], 0.24],
[[7047, 6885, 7687, 31, 211], 0.22],
[[8879, 8779, 9686, 36, 247], 0.2],
[[11187, 11193, 12204, 42, 289], 0.18],
[[14096, 14271, 15377, 49, 338], 0.16],
[[17761, 18196, 19375, 57, 395], 0.15]
],
[
[[10, 40, 30, 0, 0], 1.0]
],
[
[[220, 220, 220, 10, 10], 1.0]
],
[
[[100, 100, 100, 20, 20], 1],
[[126, 128, 126, 3, 23], 2],
[[159, 163, 159, 4, 27], 3],
[[200, 207, 200, 5, 32], 4],
[[252, 264, 252, 5, 37], 5],
[[318, 337, 318, 7, 44], 6],
[[400, 430, 400, 7, 51], 7],
[[504, 548, 504, 9, 60], 8],
[[635, 698, 635, 10, 70], 9],
[[800, 890, 800, 12, 82], 10],
[[1009, 1135, 1009, 14, 96], 11],
[[1271, 1447, 1271, 16, 112], 14],
[[1601, 1846, 1601, 20, 132], 19],
[[2018, 2353, 2018, 22, 154], 26],
[[2542, 3000, 2542, 26, 180], 35],
[[3203, 3825, 3203, 31, 211], 46],
[[4036, 4877, 4036, 36, 247], 59],
[[5085, 6218, 5085, 42, 289], 74],
[[6407, 7928, 6407, 49, 338], 91],
[[8073, 10109, 8073, 57, 395], 110],
[[10172, 12889, 10172, 67, 462], 131],
[[12817, 16433, 12817, 79, 541], 154],
[[16149, 20952, 16149, 92, 633], 179],
[[20348, 26714, 20348, 107, 740], 206],
[[25639, 34060, 25639, 126, 866], 235]
],
[
[[50, 60, 40, 5, 5], 30],
[[63, 77, 50, 1, 6], 35],
[[78, 98, 62, 1, 7], 41],
[[98, 124, 77, 1, 8], 47],
[[122, 159, 96, 1, 9], 55],
[[153, 202, 120, 1, 10], 64],
[[191, 258, 149, 2, 12], 74],
[[238, 329, 185, 2, 14], 86],
[[298, 419, 231, 2, 16], 100],
[[373, 534, 287, 2, 18], 117],
[[466, 681, 358, 3, 21], 136],
[[582, 868, 446, 3, 24], 158],
[[728, 1107, 555, 4, 28], 184],
[[909, 1412, 691, 5, 33], 214],
[[1137, 1800, 860, 5, 38], 249],
[[1421, 2295, 1071, 5, 43], 289],
[[1776, 2926, 1333, 7, 50], 337],
[[2220, 3731, 1659, 8, 58], 391],
[[2776, 4757, 2066, 9, 67], 455],
[[3469, 6065, 2572, 10, 77], 530],
[[4337, 7733, 3202, 12, 89], 616],
[[5421, 9860, 3987, 14, 103], 717],
[[6776, 12571, 4963, 16, 119], 833],
[[8470, 16028, 6180, 19, 138], 969],
[[10588, 20436, 7694, 21, 159], 1127],
[[13235, 26056, 9578, 24, 183], 1311],
[[16544, 33221, 11925, 29, 212], 1525],
[[20680, 42357, 14847, 33, 245], 1774],
[[25849, 54005, 18484, 38, 283], 2063],
[[32312, 68857, 23013, 43, 326], 2400]
],
[
[[65, 50, 40, 10, 10], 30],
[[83, 63, 50, 1, 11], 35],
[[105, 80, 62, 2, 13], 41],
[[133, 101, 76, 2, 15], 47],
[[169, 128, 95, 2, 17], 55],
[[215, 162, 117, 2, 19], 64],
[[273, 205, 145, 3, 22], 74],
[[346, 259, 180, 3, 25], 86],
[[440, 328, 224, 4, 29], 100],
[[559, 415, 277, 4, 33], 117],
[[709, 525, 344, 4, 37], 136],
[[901, 664, 426, 5, 42], 158],
[[1144, 840, 529, 6, 48], 184],
[[1453, 1062, 655, 7, 55], 214],
[[1846, 1343, 813, 8, 63], 249],
[[2344, 1700, 1008, 8, 71], 289],
[[2977, 2150, 1250, 10, 81], 337],
[[3781, 2720, 1550, 12, 93], 391],
[[4802, 3440, 1922, 13, 106], 455],
[[6098, 4352, 2383, 15, 121], 530],
[[7744, 5505, 2955, 16, 137], 616],
[[9835, 6964, 3664, 20, 157], 717],
[[12491, 8810, 4543, 22, 179], 833],
[[15863, 11144, 5633, 25, 204], 969],
[[20147, 14098, 6985, 28, 232], 1127],
[[25586, 17833, 8662, 33, 265], 1311],
[[32495, 22559, 10740, 37, 302], 1525],
[[41268, 28537, 13318, 42, 344], 1774],
[[52410, 36100, 16515, 48, 392], 2063],
[[66561, 45666, 20478, 55, 447], 2400]
],
[
[[75, 65, 70, 10, 10], 30],
[[94, 83, 87, 2, 12], 35],
[[118, 106, 108, 2, 14], 41],
[[147, 135, 133, 2, 16], 47],
[[184, 172, 165, 3, 19], 55],
[[231, 219, 205, 3, 22], 64],
[[289, 279, 254, 4, 26], 74],
[[362, 356, 316, 4, 30], 86],
[[453, 454, 391, 5, 35], 100],
[[567, 579, 485, 6, 41], 117],
[[710, 738, 602, 7, 48], 136],
[[889, 941, 746, 8, 56], 158],
[[1113, 1200, 925, 10, 66], 184],
[[1393, 1529, 1147, 11, 77], 214],
[[1744, 1950, 1422, 13, 90], 249],
[[2183, 2486, 1764, 15, 105], 289],
[[2734, 3170, 2187, 18, 123], 337],
[[3422, 4042, 2712, 21, 144], 391],
[[4285, 5153, 3363, 25, 169], 455],
[[5365, 6571, 4170, 28, 197], 530],
[[6717, 8378, 5170, 34, 231], 616],
[[8409, 10681, 6411, 39, 270], 717],
[[10528, 13619, 7950, 46, 316], 833],
[[13181, 17364, 9858, 54, 370], 969],
[[16503, 22139, 12224, 63, 433], 1127],
[[20662, 28227, 15158, 74, 507], 1311],
[[25869, 35990, 18796, 86, 593], 1525],
[[32388, 45887, 23307, 100, 693], 1774],
[[40549, 58506, 28900, 118, 811], 2063],
[[50768, 74595, 35837, 138, 949], 2400]
],
[
[[45, 40, 30, 0, 0], 240],
[[59, 53, 39, 0, 0], 281],
[[76, 70, 50, 0, 0], 329],
[[99, 92, 64, 0, 0], 386],
[[129, 121, 83, 0, 0], 452],
[[167, 160, 107, 0, 0], 530],
[[217, 212, 138, 0, 0], 622],
[[282, 279, 178, 0, 0], 729],
[[367, 369, 230, 0, 0], 854],
[[477, 487, 297, 0, 0], 1002],
[[620, 642, 383, 0, 0], 1174],
[[806, 848, 494, 0, 0], 1376],
[[1048, 1119, 637, 0, 0], 1613],
[[1363, 1477, 822, 0, 0], 1891],
[[1772, 1950, 1060, 0, 0], 2216],
[[2303, 2574, 1368, 0, 0], 2598],
[[2994, 3398, 1764, 0, 0], 3045],
[[3893, 4486, 2276, 0, 0], 3569],
[[5060, 5921, 2936, 0, 0], 4183],
[[6579, 7816, 3787, 0, 0], 4904],
[[8552, 10317, 4886, 0, 0], 5748],
[[11118, 13618, 6302, 0, 0], 6737],
[[14453, 17976, 8130, 0, 0], 7896],
[[18789, 23728, 10488, 0, 0], 9255],
[[24426, 31321, 13529, 0, 0], 10848],
[[31754, 41344, 17453, 0, 0], 12715],
[[41280, 54574, 22514, 0, 0], 14904],
[[53664, 72037, 29043, 0, 0], 17469],
[[69763, 95089, 37466, 0, 0], 20476],
[[90692, 125517, 48331, 0, 0], 24000]
],
[
[[60, 50, 40, 0, 0], 1000],
[[76, 64, 50, 0, 0], 1229],
[[96, 81, 62, 0, 0], 1512],
[[121, 102, 77, 0, 0], 1859],
[[154, 130, 96, 0, 0], 2285],
[[194, 165, 120, 0, 0], 2810],
[[246, 210, 149, 0, 0], 3454],
[[311, 266, 185, 0, 0], 4247],
[[393, 338, 231, 0, 0], 5222],
[[498, 430, 287, 0, 0], 6420],
[[630, 546, 358, 0, 0], 7893],
[[796, 693, 446, 0, 0], 9705],
[[1007, 880, 555, 0, 0], 11932],
[[1274, 1118, 691, 0, 0], 14670],
[[1612, 1420, 860, 0, 0], 18037],
[[2039, 1803, 1071, 0, 0], 22177],
[[2580, 2290, 1333, 0, 0], 27266],
[[3264, 2908, 1659, 0, 0], 33523],
[[4128, 3693, 2066, 0, 0], 41217],
[[5222, 4691, 2572, 0, 0], 50675],
[[6606, 5957, 3202, 0, 0], 62305],
[[8357, 7566, 3987, 0, 0], 76604],
[[10572, 9608, 4963, 0, 0], 94184],
[[13373, 12203, 6180, 0, 0], 115798],
[[16917, 15497, 7694, 0, 0], 142373],
[[21400, 19682, 9578, 0, 0], 175047],
[[27071, 24996, 11925, 0, 0], 215219],
[[34245, 31745, 14847, 0, 0], 264611],
[[43320, 40316, 18484, 0, 0], 325337],
[[54799, 51201, 23013, 0, 0], 400000]
],
[
[[50, 60, 50, 2, 2], 150],
[[63, 75, 63, 0, 2], 200],
[[78, 94, 78, 1, 3], 267],
[[98, 117, 98, 0, 3], 356],
[[122, 146, 122, 1, 4], 474],
[[153, 183, 153, 0, 4], 632],
[[191, 229, 191, 1, 5], 843],
[[238, 286, 238, 1, 6], 1125],
[[298, 358, 298, 1, 7], 1500],
[[373, 447, 373, 1, 8], 2000]
],
[
[[50, 100, 20, 5, 5], 0.04],
[[63, 128, 25, 1, 6], 0.08],
[[79, 163, 32, 1, 7], 0.12],
[[100, 207, 40, 1, 8], 0.16],
[[126, 264, 50, 1, 9], 0.2],
[[159, 337, 64, 2, 11], 0.24],
[[200, 430, 80, 2, 13], 0.29],
[[252, 548, 101, 2, 15], 0.34],
[[318, 698, 127, 3, 18], 0.39],
[[400, 890, 160, 3, 21], 0.44],
[[504, 1135, 202, 3, 24], 0.49],
[[635, 1447, 254, 4, 28], 0.55],
[[801, 1846, 320, 5, 33], 0.6],
[[1009, 2353, 404, 5, 38], 0.66],
[[1271, 3000, 508, 7, 45], 0.72],
[[1602, 3825, 641, 8, 53], 0.79],
[[2018, 4877, 807, 9, 62], 0.85],
[[2543, 6218, 1017, 10, 72], 0.92],
[[3204, 7928, 1281, 12, 84], 0.99],
[[4037, 10109, 1615, 15, 99], 1.07]
],
]
data = {
"requirements": {},
"levels": []
}
def create_files():
for i, name in enumerate(buildings):
f = open(f"{name}.json", "w")
data["requirements"].clear()
data["requirements"].update(requirements[i])
data["levels"] = levels[i]
f.write(json.dumps(data))
f.close()
def json_parser_for_buildings():
s = """[
[1,"{{Res|50|100|20}}","{{Workers|5|5}}","4%" ],
[2,"{{Res|63|128|25}}","{{Workers|1|6}}","8%" ],
[3,"{{Res|79|163|32}}","{{Workers|1|7}}","12%" ],
[4,"{{Res|100|207|40}}","{{Workers|1|8}}","16%" ],
[5,"{{Res|126|264|50}}","{{Workers|1|9}}","20%" ],
[6,"{{Res|159|337|64}}","{{Workers|2|11}}","24%" ],
[7,"{{Res|200|430|80}}","{{Workers|2|13}}","29%" ],
[8,"{{Res|252|548|101}}","{{Workers|2|15}}","34%" ],
[9,"{{Res|318|698|127}}","{{Workers|3|18}}","39%" ],
[10,"{{Res|400|890|160}}","{{Workers|3|21}}","44%" ],
[11,"{{Res|504|1135|202}}","{{Workers|3|24}}","49%" ],
[12,"{{Res|635|1447|254}}","{{Workers|4|28}}","55%" ],
[13,"{{Res|801|1846|320}}","{{Workers|5|33}}","60%" ],
[14,"{{Res|1009|2353|404}}","{{Workers|5|38}}","66%" ],
[15,"{{Res|1271|3000|508}}","{{Workers|7|45}}","72%" ],
[16,"{{Res|1602|3825|641}}","{{Workers|8|53}}","79%" ],
[17,"{{Res|2018|4877|807}}","{{Workers|9|62}}","85%" ],
[18,"{{Res|2543|6218|1017}}","{{Workers|10|72}}","92%" ],
[19,"{{Res|3204|7928|1281}}","{{Workers|12|84}}","99%" ],
[20,"{{Res|4037|10109|1615}}","{{Workers|15|99}}","107%" ]
]"""
json_object = json.loads(s)
result = ""
for index, element in enumerate(json_object):
wood, clay, iron = str(element[1][6:-2]).split("|")
pop = str(element[2][10:-2]).split("|")[0]
factor = float(element[3][0:-1]) / 100
print(f"[[{wood},{clay},{iron},{pop}], {factor}],")
def json_parser_for_points():
s = """[
["1,10,16,20,24,10,10,42,512,19,0,24,10,6,6,6,5,6,5,8","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["2,2,3,4,5,2,,8,,4,,,2,1,1,1,1,1,1,2","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["3,2,4,5,6,2,,10,,4,,,2,2,2,2,1,2,1,2","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["4,3,5,6,6,,,13,,6,,,3,1,1,1,2,1,2,2","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["5,4,5,6,9,,,14,,6,,,4,2,2,2,1,2,1,3","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["6,4,7,9,10,,,18,,8,,,4,3,3,3,2,3,2,3","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["7,5,8,10,12,,,20,,10,,,5,3,3,3,3,3,3,4","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["8,6,9,12,14,,,25,,11,,,6,3,3,3,3,3,3,5","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["9,7,12,14,17,,,31,,14,,,7,5,5,5,3,5,3,5","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["10,9,14,17,21,,,36,,16,,,9,5,5,5,5,5,5,7","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["11,10,16,21,25,,,43,,20,,,10,6,6,6,5,6,,9","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["12,12,20,25,29,,,52,,23,,,12,8,8,8,6,8,,9","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["13,15,24,29,36,,,62,,28,,,15,8,8,8,8,8,,12","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["14,18,28,36,43,,,75,,34,,,18,11,11,11,8,11,,15","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["15,21,34,43,51,,,90,,41,,,21,13,13,13,11,13,,17","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["16,26,42,51,,,,108,,49,,,26,15,15,15,13,15,,20","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["17,31,49,62,,,,130,,58,,,31,19,19,19,15,19,,25","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["18,37,59,74,,,,155,,71,,,37,22,22,22,19,22,,29","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["19,44,71,88,,,,186,,84,,,44,27,27,27,22,27,,36","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["20,53,85,107,,,,224,,101,,,53,32,32,32,27,32,,43","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["21,64,102,,,,,,,,,,64,38,38,38,32,38,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["22,77,123,,,,,,,,,,77,46,46,46,38,46,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["23,92,147,,,,,,,,,,92,55,55,55,46,55,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["24,110,177,,,,,,,,,,110,66,66,66,55,66,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["25,133,212,,,,,,,,,,133,80,80,80,66,80,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["26,159,,,,,,,,,,,,95,95,95,80,95,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["27,191,,,,,,,,,,,,115,115,115,95,115,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["28,229,,,,,,,,,,,,137,137,137,115,137,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["29,274,,,,,,,,,,,,165,165,165,137,165,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ],
["30,330,,,,,,,,,,,,198,198,198,165,198,,","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" ]
]"""
json_object = json.loads(s)
matrix = [[0 for x in range(30)] for y in range(16)]
y = 0
x = 0
for index_y, element_y in enumerate(json_object):
l = str(element_y[0][1:]).split("'")[0].split(",")
for index_x, element_x in enumerate(l):
if index_x == 0 or 5 <= index_x <= 7:
continue
matrix[y][x] = 0 if element_x == '' else int(element_x)
y += 1
x += 1
y = 0
f = open(f"POINTS.json", "w")
f.write(json.dumps(matrix))
f.close()
# create_files()
# json_parser_for_buildings()
# json_parser_for_points()
| StarcoderdataPython |
154598 | import discord
from discord.ext import commands
from discord.utils import get
import asyncio
from gtoken import *
comando = ["turma entrar", "turma sair", "turma add", "info", "alterar prefix"]
client = discord.Client()
prefixo = Prefixo()
msg_id = None
msg_user = None
@client.event
async def on_ready():
print('Rodando {0.user}'.format(client))
@client.event
async def on_message(message, prefixo = prefixo):
if message.author == client.user:
return
if message.content.startswith((prefixo.retornaPrefix() + ' turma add')):
await message.channel.send('Esse comando permite que você adicione uma turma/cargo!')
elif message.content.startswith((prefixo.retornaPrefix() + ' turma sair')):
await message.channel.send('Esse comando permite que você saia de uma turma/cargo que você está incluso!')
elif message.content.startswith((prefixo.retornaPrefix() + ' turma entrar')):
embed1 = discord.Embed(
title="Escolha a turma que deseja entrar!",
color=0x690FC3,
description="- IHC = 🐤\n"
"- Grafos = 📘 \n"
"- LIP-Rodrigo = 📙",)
botmsg = await message.channel.send(embed=embed1)
await botmsg.add_reaction("🐤")
await botmsg.add_reaction("📘")
await botmsg.add_reaction("📙")
global msg_id
msg_id = botmsg.id
elif message.content.startswith((prefixo.retornaPrefix() + ' info')):
await message.channel.send('O BODOC tem como objetivo reunir alunos das mesmas turmas ' +
'facilitando assim a comunicação entre eles.')
mensagem = "\nEsses são todos os comandos disponiveis:"
for item in comando:
mensagem += "\n" + prefixo.retornaPrefix() + " " + item
await message.channel.send(mensagem)
elif message.content.startswith((prefixo.retornaPrefix() + ' alterar prefix')):
prefixo.alteraPrefix(message.content.split()[3])
await message.channel.send('Prefixo alterado para ' + prefixo.retornaPrefix())
# await message.channel.send('Esse comando permite que você saia de uma turma/cargo que você está incluso!')
global msg_user
msg_user = message.author
@client.event
async def on_reaction_add(reaction, user):
msg = reaction.message
if reaction.emoji == "🐤" and msg.id == msg_id and user.id != client.user.id:
await user.add_roles(get(reaction.message.guild.roles, name="IHC"))
elif reaction.emoji == "📘" and msg.id == msg_id and user.id != client.user.id:
await user.add_roles(get(reaction.message.guild.roles, name="Grafos"))
elif reaction.emoji == "📙" and msg.id == msg.id and user.id != client.user.id:
await user.add_roles(get(reaction.message.guild.roles, name="LIP-Rodrigo"))
@client.event
async def on_reaction_remove(reaction, user):
msg = reaction.message
if reaction.emoji == "🐤" and msg.id == msg_id and user.id != client.user.id:
await user.remove_roles(get(reaction.message.guild.roles, name="IHC"))
elif reaction.emoji == "📘" and msg.id == msg_id and user.id != client.user.id:
await user.remove_roles(get(reaction.message.guild.roles, name="Grafos"))
elif reaction.emoji == "📙" and msg.id == msg.id and user.id != client.user.id:
await user.remove_roles(get(reaction.message.guild.roles, name="LIP-Rodrigo"))
client.run(retornatoken()) | StarcoderdataPython |
1650511 | <gh_stars>0
def btc_total(cheese=0, pie=0, tea=0, sandwich=0, chips=0, end="\n\n"):
return cheese * 4 + pie * 3 + sandwich * 8 + tea + chips
| StarcoderdataPython |
156473 | <reponame>eriknyquist/chatbot_utils
import re
import random
from unittest import TestCase
from chatbot_utils.redict import ReDict
class TestReDict(TestCase):
def fill_redict(self, dictobj=None, numitems=1000):
if not dictobj:
dictobj = ReDict()
testitems = {"((foo+|bar*) )?%d" % i : i for i in range(numitems)}
for key, val in testitems.items():
dictobj[key] = val
return testitems, dictobj
def test_all_items_accessible(self):
num_iterations = 50
d = ReDict()
for i in range(num_iterations):
expr = "(foo*|bar+) %d" % i
d[expr] = i
for i in range(num_iterations):
test1 = "fo %d" % i
test2 = "foo %d" % i
test3 = "foooo %d" % i
test4 = "bar %d" % i
test5 = "barr %d" % i
test6 = "barrrrr %d" % i
for testval in [test1, test2, test3, test4, test5, test6]:
self.assertEqual(i, d[testval])
def test_groups_per_regex(self):
d = ReDict()
num_iterations = d.groups_per_regex * 3
for i in range(num_iterations):
expr = "((f)(o)(o)*|(b)(a)(r)+) %d" % i
d[expr] = i
for i in range(num_iterations):
self.assertEqual(i, d["foo %d" % i])
def test_value_can_be_arbitrary_object(self):
d = ReDict()
strval = "test string"
boolval = False
classval = self.__class__
funcval = self.setUpClass
d["str"] = strval
d["bool"] = boolval
d["class"] = classval
d["func"] = funcval
self.assertIs(d["str"], strval)
self.assertIs(d["bool"], boolval)
self.assertIs(d["class"], classval)
self.assertIs(d["func"], funcval)
def test_compile(self):
# get type object for compiled regex
retype = type(re.compile("a+"))
d = ReDict()
d["a"] = 1
d["b"] = 2
d["c"] = 3
self.assertFalse(d.compiled)
d.compile()
self.assertTrue(len(d.compiled) > 0)
for c in d.compiled:
self.assertTrue(isinstance(c, retype))
def test_groups(self):
d = ReDict()
num = 8
val1 = "hello"
val2 = "world"
val3 = "!"
expr = "(.*) (.*) (.*)"
d[expr] = num
testinput = "%s %s %s" % (val1, val2, val3)
self.assertEqual(num, d[testinput])
groups = d.groups()
self.assertEqual(groups[0], val1)
self.assertEqual(groups[1], val2)
self.assertEqual(groups[2], val3)
def test_dump_to_dict(self):
testitems, d = self.fill_redict()
dumped = d.dump_to_dict()
for key in dumped:
self.assertTrue(key in testitems)
self.assertEqual(dumped[key], testitems[key])
self.assertEqual(len(testitems), len(dumped))
def test_load_from_dict(self):
testitems = {
"x+": 1,
"y?": 2,
"z*": 3
}
d = ReDict()
for key in testitems:
d[key] = testitems[key]
dumped = d.dump_to_dict()
loaded_redict = ReDict().load_from_dict(dumped)
self.assertEqual(testitems["x+"], loaded_redict["xxxx"])
self.assertEqual(testitems["y?"], loaded_redict["y"])
self.assertEqual(testitems["z*"], loaded_redict["zz"])
def test_pop(self):
d = ReDict()
d["a+"] = 1
d["b+"] = 2
self.assertEqual(2, len(d))
self.assertEqual(d["aaa"], 1)
self.assertEqual(d["bbb"], 2)
self.assertEqual(d.pop("b"), 2)
self.assertEqual(1, len(d))
self.assertEqual(d["aaa"], 1)
self.assertRaises(KeyError, d.__getitem__, "bbb")
def test_items(self):
testitems, d = self.fill_redict()
redict_items = d.items()
self.assertEqual(len(redict_items), len(testitems))
for key, value in redict_items:
self.assertTrue(key in testitems)
self.assertEqual(value, testitems[key])
def test_values(self):
testitems, d = self.fill_redict()
redict_values = d.values()
self.assertEqual(len(redict_values), len(testitems))
for value in redict_values:
self.assertTrue(value in testitems.values())
def test_keys(self):
testitems, d = self.fill_redict()
redict_keys = d.keys()
self.assertEqual(len(redict_keys), len(testitems))
for key in redict_keys:
self.assertTrue(key in testitems)
def test_iteritems(self):
item_count = 0
testitems, d = self.fill_redict()
for key, value in d.iteritems():
self.assertTrue(key in testitems)
self.assertEqual(value, testitems[key])
item_count += 1
self.assertEqual(item_count, len(testitems))
def test_clear(self):
d = ReDict()
testitems = {
"q+": 4,
"r*": 5,
"s?": 6
}
for key, val in testitems.items():
d[key] = val
self.assertEqual(d["qqq"], 4)
self.assertEqual(len(testitems), len(d))
d.clear()
self.assertEqual(0, len(d))
self.assertRaises(KeyError, d.__getitem__, "qqq")
def test_copy(self):
d = ReDict()
testitems = {
"xyz+": 4,
"ab*c": 5,
"def?": 6
}
for key, val in testitems.items():
d[key] = val
d2 = d.copy()
self.assertEqual(len(d), len(d2))
for key, val in d.iteritems():
self.assertTrue(key in d2.keys())
self.assertTrue(val in d2.values())
self.assertEqual(d2["xyz"], d["xyz"])
self.assertEqual(d2["abbbc"], d["abbbc"])
self.assertEqual(d2["def"], d["def"])
def test_update(self):
d1 = ReDict()
d2 = ReDict()
testitems = {
"xyz+": 4,
"ab*c": 5,
"def?": 6
}
updateitems = {
"q+": 1,
"r*": 2,
"s?": 3
}
for key, val in testitems.items():
d1[key] = val
for key, val in updateitems.items():
d2[key] = val
d1.update(d2)
self.assertEqual(len(d1), len(testitems) + len(updateitems))
for key, val in testitems.items():
self.assertTrue(key in d1.keys())
self.assertTrue(val in d1.values())
for key, val in updateitems.items():
self.assertTrue(key in d1.keys())
self.assertTrue(val in d1.values())
def test_delete_items(self):
num_iterations = 50
d = ReDict()
added = {}
deleted = {}
for i in range(num_iterations):
expr = "(bar?|foo*) %d" % i
added[expr] = i
d[expr] = i
# Randomly delete some items
delete_count = random.randrange(20, 30)
for _ in range(delete_count):
key = random.choice(list(added.keys()))
deleted[key] = added[key]
del added[key]
del d[key]
# Verify deleted items are missing
for key, value in d:
if key in added:
self.assertTrue(key in d.keys())
self.assertEqual(value, added[key])
elif key in deleted:
self.assertFalse(key in d.keys())
try:
_ = d[key]
except KeyError:
keyerror = True
else:
keyerror = False
self.assertTrue(keyerror)
else:
raise RuntimeError("Malformed test data")
| StarcoderdataPython |
1774697 | <reponame>nonlinearxwaves/MultiLevelQuantumGates<gh_stars>0
# -*- coding: utf-8 -*-
# Library of classes for varius quantum gates
#
# By Claudio
# Initial version september 2018
# Current version 3 february 2019
import numpy as np
import tensorflow as tf
#class for defining various operators
class quantumgates:
def Rz(a,N=2,datatype=np.complex64):
''' Return the 2x2 rotator matrix Rz
Rz(alpha)=[exp(i a/2) 0; 0 exp(-i a /2)]
see PRA 52, 3457 (1995)
'''
if N!=2:
print("error: Rx only returned for N=2")
return
data=np.zeros((N,N),dtype=datatype)
data[0,0]=np.exp(1j*0.5*a)
data[0,1]=0.0
data[1,0]=0.0
data[1,1]=np.exp(-1j*0.5*a)
return data
def Ry(t,N=2,datatype=np.complex64):
''' Return the 2x2 rotator matrix Ry
Ry(t)=[cos(t/2) sin(t/2); -sin(t/2) cos(t/2)]
see PRA 52, 3457 (1995)
'''
if N!=2:
print("error: Ry only returned for N=2")
return
data=np.zeros((N,N),dtype=datatype)
data[0,0]=np.cos(t*0.5)
data[0,1]=np.sin(t*0.5)
data[1,0]=-np.sin(t*0.5)
data[1,1]=np.cos(t*0.5)
return data
def paulix(N=2,datatype=np.complex64):
''' Return the 2x2 Pauli matrix \sigma_x
sigmax=[0 1; 1 0]
see PRA 52, 3457 (1995)
'''
if N!=2:
print("error: sigmax only returned for N=2")
return
data=np.zeros((N,N),dtype=datatype)
data[0,0]=0.0
data[0,1]=1.0
data[1,0]=1.0
data[1,1]=0.0
return data
def Xgate(N,datatype=np.complex64):
#Return a matrix NxN that represent a X gate for 1 qdit with size N
# in the input one has the size N and the optional datatype argument
# default for datatype is np.complex64
dataXN=np.zeros((N,N),dtype=datatype)
for ix in range(N):
dataXN[ix,divmod(ix+1,N)[1]]=1.0
return np.asarray(dataXN,dtype=datatype)
def Zgate(N,datatype=np.complex64):
#Return a matrix NxN that represent a Z gate for 1 qdit with size N
# in the input one has the size N and the optional datatype argument
# default for datatype is np.complex64
dataXN=np.zeros((N,N),dtype=np.complex64)
for ix in range(N):
dataXN[ix,ix]=np.complex(np.exp(1j*np.pi*ix/N))
return np.asarray(dataXN,dtype=np.complex64)
def randomU(N,datatype=np.complex64):
#Return a random unitary matrix
# in the input one has the size N and the optional datatype argument
# default for datatype is np.complex64
# Important need to set the randomseed
dataUc=np.zeros([N,N],dtype=datatype)
for ix in range(N):
for iy in range(N):
dataUc[ix,iy]=np.random.random()+1j*np.random.random()
dataU_np=np.asarray(dataUc,dtype=datatype)
Q , _ = np.linalg.qr(dataU_np) #use LQ to generate a unitary matrix
return Q
def randomvector(N,datatype=np.complex64):
#Return a random vector with complex values
data=np.random.random([N,1])+1j*np.random.random([N,1])
return data
def unitarize(M_np):
# Use LQ decomposition and return Q to convert a matrix into a unitary
Q , _ = np.linalg.qr(np.asarray(M_np)) #use LQ to generate a unitary matrix
return Q
def randomuniform(N,datatype=np.complex64):
#Return a random square matrix with uniform numbers
# in the input one has the size N and the optional datatype argument
# default for datatype is np.complex64
# Important need to set the randomseed
dataUc=np.zeros([N,N],dtype=datatype)
for ix in range(N):
for iy in range(N):
dataUc[ix,iy]=np.random.random()+1j*np.random.random()
return np.asarray(dataUc,dtype=datatype)
def randomdiagonal(N,datatype=np.complex64):
#Return a random square matrix with uniform numbers
# in the input one has the size N and the optional datatype argument
# default for datatype is np.complex64
# Important need to set the randomseed
dataUc=np.zeros([N,N],dtype=datatype)
for ix in range(N):
dataUc[ix,ix]=np.random.random()+1j*np.random.random()
return np.asarray(dataUc,dtype=datatype)
def projector(N,M,datatype=np.complex64):
#Return a projector, i.e., a matrix with size NxM formed by [IN, 0]
# with IN the identity matrix and all other elements zero
data=np.zeros([N,M],dtype=datatype)
if M<N:
print("Error ! M cannot be smaller than N in quantumgates.projector")
return 0.0
for ix in range(N):
data[ix,ix]=1.0
return data
def riggoperator(X,M,datatype=np.complex64):
# given an operator X with size NxN return a rigged operator with size NxM
# with shape [X | 0 ] with 0 a zeros matrix with size NxM-N
N=X.shape[0]
N1=X.shape[1]
if N!=N1:
print("Error ! X must be a square matrix in quantumgates.riggoperator")
return 0.0
data=np.zeros([N,M],dtype=datatype)
if M<N:
print("Error ! M cannot be smaller than N in quantumgates.riggoperator")
return data
for ix in range(N):
for iy in range(N):
data[ix,iy]=X[ix,iy]
return data
def riggunitary(X,M,datatype=np.complex64):
# given an operator X with size NxN return a rigged operator with size MxM
# with shape [X | 0 ; 0^T | U(M-N)] with 0 a zeros matrix with size NxM-N
# and U a random unitary matrix
N=X.shape[0]
N1=X.shape[1]
if N!=N1:
print("Error ! X must be a square matrix in quantumgates.riggunitary")
return 0.0
data=np.zeros([M,M],dtype=datatype)
if M<N:
print("Error ! M cannot be smaller than N in quantumgates.riggunitary")
return data
for ix in range(N):
for iy in range(N):
data[ix,iy]=X[ix,iy]
if M>N:
Ureduced=quantumgates.randomU(M-N,datatype)
for ix in range(M-N):
for iy in range(M-N):
data[ix+N,iy+N]=Ureduced[ix,iy]
return data
def riggzero(X,M,datatype=np.complex64):
# given an operator X with size NxN return a rigged operator with size MxM
# with shape [X | 0 ; 0^T | 0C)] with 0 a zeros matrix with size NxM-N
# and 0C a zero matrix of size (M-N x M-N)
N=X.shape[0]
N1=X.shape[1]
if N!=N1:
print("Error ! X must be a square matrix in quantumgates.riggunitary")
return 0.0
data=np.zeros([M,M],dtype=datatype)
if M<N:
print("Error ! M cannot be smaller than N in quantumgates.riggunitary")
return data
for ix in range(N):
for iy in range(N):
data[ix,iy]=X[ix,iy]
if M>N:
for ix in range(M-N):
for iy in range(M-N):
data[ix+N,iy+N]=0.0
return data
def riggidentity(X,datatype=np.complex64):
# given an operator X with size NxN return a N+1xN+1 operator
# with structure [1 | 0; 0 | X] i.e. embed in a identiy matrix
X=np.matrix(X)
(N,M)=X.shape
if M!=N:
print("Error: quantumgates.riggidentity only works with square matrix")
return 0.0
N1=N+1
I1=np.eye(N1,dtype=datatype)
for ix in range(N):
for iy in range(N):
I1[ix+1,iy+1]=X[ix,iy]
return I1
def multiriggidentity(X,N,datatype=np.complex64):
# given an operator rigg many times the X as in riggunitary as far as
# the dimension is N
X=np.matrix(X)
(NX,MX)=X.shape
if MX!=NX:
print("Error: quantumgates.multiriggidentity only works with square matrix")
return 0.0
if N<MX:
print("Warning: quantumgates.multiriggidentity, operator has size greater than N")
return X
tildeX=X
for count in range(N-NX):
tildeX=quantumgates.riggidentity(tildeX)
return tildeX
def schwinger(c,datatype=np.complex64):
# return a unitary operator built with the schwinger basis P^m Q^n
# as a linear combination c(m,n)P^m U^n
# M, N is the size of the input coefficient matrix c
(M,N)=c.shape
if M!=N:
print("Error: quantumgates.schwinger only works with square matrix")
return 0.0
U=np.zeros([M,M],datatype)
P=quantumgates.Xgate(N,datatype)
Q=quantumgates.Zgate(N,datatype)
E=np.identity(N,datatype)
xQ=E
for ix in range(M):
xPQ=xQ
for iy in range(N):
U=U+c[ix,iy]*xPQ
xPQ=np.matmul(P,xPQ)
xQ=np.matmul(Q,xQ)
return U, P, Q
def isunitary(U):
#return true if matrix U is unitary
(M,N)=U.shape
if M!=N:
print("quantumgates.isunitary: matrix is not square")
return False
U1=np.matrix(U)
identity=np.matmul(U1.getH(),U1)
output=False
if np.round(np.trace(identity),0)==M:
output=True
return output
def Householderreflection(v,datatype=np.complex64):
# Given a complex vector generate an Hausholder reflection
# References Ozlov notes, Mezzadri arXiv:math-ph/0609050
#
# This operator H(v) is such thatn H(v).v=||v||e1
#
# Version 2 november 2018, by claudio
v=np.matrix(v)
(N,M)=v.shape
if M!=1:
print("quantumgates.Householderreflection: v must be column")
return 0.0
# extract firs element of v
v1=np.asscalar(v[0])
theta=np.asscalar(np.angle(v1))
expitheta=np.exp(1j*theta)
normv1=np.linalg.norm(v)
# build unitary vector in direction 1
e1=np.zeros([N,1],dtype=datatype)
e1[0]=1.0
# build u vector
u=v+expitheta*normv1*e1
u=u/np.linalg.norm(u)
# build matrix
H=np.eye(N,dtype=datatype)-2*np.matmul(u,u.getH())
H=-np.exp(-1j*theta)*H
return H
# def beamsplitter(N,p,q,omega,phi,datatype=np.complex64):
# # return the beam splitter matrix in the subspace of a matrix N\times NxN
# # for the general decomposition of a Unitary operator in beam splitters
# # See Reck et al, PRL 73, 58 (1994
# #
# # Remark the index goes from 0 to N-1
## T=np.eye(N,dtype=datatype)
## T[p][p]=np.exp(np.1j*phi)*np.sin(omega)
## T[p][q]=np.exp(np.1j*phi)*np.cos(omega)
## T[q][p]=np.cos(omega)
## T[q][q]=-np.sin(omega)
# return T
#
#%% class for useful output operations with tensorflow
class utilities:
def printonscreen(VT):
#print a tensor a matrix on the screen with a given precision
VTnp=VT.numpy()
N=VTnp.shape[0]
M=VTnp.shape[1]
for ix in range(N):
for iy in range(M):
re=np.real(VTnp[ix,iy])
im=np.imag(VTnp[ix,iy])
print('{:+02.1f}{:+02.1f}i'.format(re,im),end=" ")
print("") #print endlie
def printonscreennp(VTnp):
#print a tensor a matrix on the screen with a given precision
N=VTnp.shape[0]
M=VTnp.shape[1]
for ix in range(N):
for iy in range(M):
re=np.real(VTnp[ix,iy])
im=np.imag(VTnp[ix,iy])
print('{:+02.1f}{:+02.1f}i'.format(re,im),end=" ")
print("") #print endlie
#%% class for training quantum gates
class quantumgatesinference:
def trainrandom(X_np,M,
verbose=2,
inputaccuracy=1e-4,
ntrain=100,
nvalid=50):
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_Np, gate as numpy matrix
# M, size embedding space
# verbose, 0 no output, 1 minimal, 2 all
#%% vari import here
###### DA FINIRE !!!!!!!!!
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
#%% datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
# ntrain=100 # training set
# nvalid=50 # validation set
#%% epochs
epochs=100 # maximal number of epochs
display_steps=2 # number of steps between each validations
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% generate the tf tensor for the input gate
#XT=tf.constant(X_np,dtype=tfdatatype)
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
dataU_np=quantumgates.randomU(M,npdatatype)
U=tf.constant(dataU_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random_uniform([M,M],dtype=tfrealdatatype),tf.random_uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(M):
xtrains[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize training set
xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize validation set
xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% projector that extract the first N rows from a vector M
#project=tf.constant(quantumgates.projector(N,M,npdatatype),dtype=tfdatatype)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
equation=tf.matmul(transfer_matrix,x)-yt
eqreal=tf.real(equation)
eqimag=tf.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>1:
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
return out, Wfinal, Tfinal, Tinitial
#%%%%
def traincomplex(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
ntrain=100,
nvalid=50):
# Given a gate with size N, and a complex system described by an input MxM U_np transfer matrix
# use a NN to train an input gate to act as the input unitary class
#
# The input gate is only a phase gate, described by a diagonal matrix
# with diagonal exp(i phi1), exp(i phi2), ..., exp(i phin)
#
# with phi1, phi2, ..., phin are trainable
#
# TO DO, make batch training (not use it can train without batch)
#
# Date: 5 April 2019, by Claudio
#
# Input:
# X_np, gate as numpy matrix
# U_np, complex system unitary matrix (not checked if unitary) a numpy matrix
# verbose, 0 no output, 1 minimal, 2 all
#%% vari import here
###### DA FINIRE !!!!!!!!!
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
#%% datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
# ntrain=100 # training set
# nvalid=50 # validation set
#%% epochs
epochs=100 # maximal number of epochs
display_steps=2 # number of steps between each validations
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% generate the tf tensor for the input gate
#XT=tf.constant(X_np,dtype=tfdatatype)
#Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random_uniform([M,M],dtype=tfrealdatatype),tf.random_uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(M):
xtrains[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize training set
xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize validation set
xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% projector that extract the first N rows from a vector M
#project=tf.constant(quantumgates.projector(N,M,npdatatype),dtype=tfdatatype)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
equation=tf.matmul(transfer_matrix,x)-yt
eqreal=tf.real(equation)
eqimag=tf.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>1:
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=N
out['M']=M
out['X']=X_np
out['U']=U_np
return out, Wfinal, Tfinal, Tinitial
#%% class for training SLM with single input
class SLM:
def trainSLMsingleinputquantized(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100,
realMIN=-1.0, realMAX=1.0,
imagMIN=0.0, imagMAX=0.0,
quantizedbits=8):
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_Np, gate as numpy matrix
# M, size embedding space
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM
#
# WrealMAX, WrealMIN, maximal and minimal value for Wreal
#
# WimagMAX, WimagMIN, maximal and minimal value for Wimag (if both 0 is a real weigth)
#
# quantized bits
#%% vari import here
###### DA FINIRE !!!!!!!!!
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
#%% datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
ntrain=1
nvalid=1
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
if M<N:
print("Error: embedding dimension M cannot be smaller then N")
return
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
print(RXT)
#%% unitary rigging of X
# XT=tf.constant(X_np)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random.uniform([M,M],dtype=tfrealdatatype),tf.random.uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(M):
xtrains[i,j]=1.0
#%% normalize training set
xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=1.0
#%% normalize validation set
xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% projector that extract the first N rows from a vector M
#project=tf.constant(quantumgates.projector(N,M,npdatatype),dtype=tfdatatype)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
#clipping the weigths
Wreal=tf.clip_by_value(Wreal,realMIN,realMAX)
Wimag=tf.clip_by_value(Wimag,imagMIN,imagMAX)
# quantize
Wreal=tf.quantization.quantize_and_dequantize(Wreal,realMIN,realMAX,signed_input=False,num_bits=quantizedbits)
Wimag=tf.quantization.quantize_and_dequantize(Wimag,imagMIN,imagMAX,signed_input=False,num_bits=quantizedbits)
# build the matrices (phase only modulator)
#W=tf.complex(cWreal,cWimag)
W=tf.complex(tf.cos(Wreal),tf.sin(Wreal))
transfer_matrix=tf.matmul(U,W)
equation=tf.matmul(transfer_matrix,x)-yt
eqreal=tf.math.real(equation)
eqimag=tf.math.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' N ' + repr(N) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
rWfinal=Wreal.eval()
iWfinal=Wimag.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>2:
print("Final Wreal")
utilities.printonscreennp(rWfinal)
print("Final Wimag")
utilities.printonscreennp(iWfinal)
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
return out, Wfinal, Tfinal, Tinitial
def complexqtzd(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100,
realMIN=-1.0, realMAX=1.0,
imagMIN=0.0, imagMAX=0.0,
nbits=8):
#%% Train a single input SLM with complex matrix
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_np, gate as numpy matrix
# U_np, unitary matrix for medium
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM with complex matrix
#
# WrealMAX, WrealMIN, maximal and minimal value for Wreal
#
# WimagMAX, WimagMIN, maximal and minimal value for Wimag
# If WimagMAX=WimagMIN=0 is a amplitude modulator
#%%
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
# datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
ntrain=1
nvalid=1
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
if M<N:
print("Error: embedding dimension M cannot be smaller then N")
return
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random.uniform([M,M],dtype=tfrealdatatype),tf.random.uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% current output
yC=tf.complex(tf.random.uniform([M,1],dtype=tfrealdatatype),tf.random.uniform([M,1],dtype=tfrealdatatype))
yout=tf.get_variable("current_y",initializer=yC,trainable=False)
yt=tf.get_variable("target_y",initializer=yC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set, one single input all 1 to N, M-N zeros
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(N):
xtrains[i,j]=1.0
#%% normalize training set
#xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set (here equal to the training)
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(N):
xvalids[i,j]=1.0
#%% normalize validation set
#xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
#clipping the weigths
Wreal=tf.clip_by_value(Wreal,realMIN,realMAX)
Wimag=tf.clip_by_value(Wimag,imagMIN,imagMAX)
# quantize
Wreal=tf.quantization.quantize_and_dequantize(Wreal,realMIN,realMAX,signed_input=False,num_bits=nbits)
Wimag=tf.quantization.quantize_and_dequantize(Wimag,imagMIN,imagMAX,signed_input=False,num_bits=nbits)
# build the matrices (phase only modulator)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
yout=tf.matmul(transfer_matrix,x)
equation=yout-yt
eqreal=tf.math.real(equation)
eqimag=tf.math.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' N ' + repr(N) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
ytargetf=sess.run(yt, feed_dict={x: xtmp})
youtf=sess.run(yout, feed_dict={x: xtmp})
rWfinal=Wreal.eval()
iWfinal=Wimag.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>2:
print("Final Wreal")
utilities.printonscreennp(rWfinal)
print("Final Wimag")
utilities.printonscreennp(iWfinal)
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
out['xtrain']=xtrains
out['Wreal']=rWfinal
out['Wimag']=iWfinal
out['Wfinal']=Wfinal
out['Tfinal']=Tfinal
out['yt']=ytargetf
out['y']=youtf
out['cost']=avg_cost_valid
return out
def complex(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100,
realMIN=-1.0, realMAX=1.0,
imagMIN=0.0, imagMAX=0.0):
#%% Train a single input SLM with complex matrix
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_np, gate as numpy matrix
# U_np, unitary matrix for medium
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM with complex matrix
#
# WrealMAX, WrealMIN, maximal and minimal value for Wreal
#
# WimagMAX, WimagMIN, maximal and minimal value for Wimag
# If WimagMAX=WimagMIN=0 is a amplitude modulator
#%%
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
# datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
ntrain=1
nvalid=1
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
if M<N:
print("Error: embedding dimension M cannot be smaller then N")
return
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random.uniform([M,M],dtype=tfrealdatatype),tf.random.uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% current output
yC=tf.complex(tf.random.uniform([M,1],dtype=tfrealdatatype),tf.random.uniform([M,1],dtype=tfrealdatatype))
yout=tf.get_variable("current_y",initializer=yC,trainable=False)
yt=tf.get_variable("target_y",initializer=yC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set, one single input all 1 to N, M-N zeros
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(N):
xtrains[i,j]=1.0
#%% normalize training set
#xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set (here equal to the training)
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(N):
xvalids[i,j]=xtrains[i,j]
#%% normalize validation set
#xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
#clipping the weigths
Wreal=tf.clip_by_value(Wreal,realMIN,realMAX)
Wimag=tf.clip_by_value(Wimag,imagMIN,imagMAX)
# build the matrices (phase only modulator)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
yout=tf.matmul(transfer_matrix,x)
equation=yout-yt
eqreal=tf.math.real(equation)
eqimag=tf.math.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' N ' + repr(N) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
ytargetf=sess.run(yt, feed_dict={x: xtmp})
youtf=sess.run(yout, feed_dict={x: xtmp})
rWfinal=Wreal.eval()
iWfinal=Wimag.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>2:
print("Final Wreal")
utilities.printonscreennp(rWfinal)
print("Final Wimag")
utilities.printonscreennp(iWfinal)
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
out['xtrain']=xtrains
out['Wreal']=rWfinal
out['Wimag']=iWfinal
out['Wfinal']=Wfinal
out['Tfinal']=Tfinal
out['yt']=ytargetf
out['y']=youtf
out['cost']=avg_cost_valid
return out
def phaseonly(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100):
#%% Train a single input SLM with complex matrix
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_np, gate as numpy matrix
# U_np, unitary matrix for medium
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM with complex matrix
#
# WrealMAX, WrealMIN, maximal and minimal value for Wreal
#
# WimagMAX, WimagMIN, maximal and minimal value for Wimag (if both 0 is a real weigth)
#
#%%
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
import math
# datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
ntrain=1
nvalid=1
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% bounds for weigths
realMAX=math.pi
realMIN=-math.pi
imagMAX=0.0
imagMIN=0.0
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph (compatibility for tf 2)
# tf.compat.v1.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
# tf.compat.v1.set_random_seed(RANDOM_SEED) # for tf 2
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
if M<N:
print("Error: embedding dimension M cannot be smaller then N")
return
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random.uniform([M,M],dtype=tfrealdatatype),tf.random.uniform([M,M],dtype=tfrealdatatype))
# Wreal=tf.compat.v1.get_variable("Wr",initializer=W0,dtype=tfrealdatatype) # for tf 2
# Wimag=tf.compat.v1.get_variable("Wi",initializer=W0,dtype=tfrealdatatype) # for tf 2
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% current output
yC=tf.complex(tf.random.uniform([M,1],dtype=tfrealdatatype),tf.random.uniform([M,1],dtype=tfrealdatatype))
yout=tf.get_variable("current_y",initializer=yC,trainable=False)
yt=tf.get_variable("target_y",initializer=yC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
# it is a single input with M size, first elements are 1 the other are zeros
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(N):
xtrains[i,j]=1.0
#%% normalize training set
#xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=xtrains[i,j]
#%% normalize validation set
#xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
#clipping the weigths
Wreal=tf.clip_by_value(Wreal,realMIN,realMAX)
Wimag=tf.clip_by_value(Wimag,imagMIN,imagMAX)
# build the matrices (phase only modulator)
W=tf.complex(tf.cos(Wreal),tf.sin(Wreal))
transfer_matrix=tf.matmul(U,W)
yout=tf.matmul(transfer_matrix,x)
equation=yout-yt
eqreal=tf.math.real(equation)
eqimag=tf.math.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(
# cost_function)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' N ' + repr(N) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
ytargetf=sess.run(yt, feed_dict={x: xtmp})
youtf=sess.run(yout, feed_dict={x: xtmp})
rWfinal=Wreal.eval()
iWfinal=Wimag.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>2:
print("Final Wreal")
utilities.printonscreennp(rWfinal)
print("Final Wimag")
utilities.printonscreennp(iWfinal)
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
out['xtrain']=xtrains
out['Wfinal']=Wfinal
out['Tfinal']=Tfinal
out['yt']=ytargetf
out['y']=youtf
out['cost']=avg_cost_valid
return out
| StarcoderdataPython |
3342232 | <reponame>abhigyan709/dsalgo<gh_stars>1-10
# list is a type of an array/or simply array
# its same like any other languages having arrays
# it stores a series of items in a particular order.
# allows to store sets of information in one place,
# whether you have just a few items or millions of items.
# one of the most poweful feature in Python
# Defining a list
# Making a List
users = ['val', 'bob', 'mia', 'ron', 'ned']
# accessing the elements
# indexing starts from 0 from L -> r
first_user = users[0] # first element
second_user = users[0] # second user
newest_user = users[-1] # last element
# modifying individual items
users[0] = 'valerie'
users[-2] = 'ronald'
# printing the list:
print("Here is my List: ",users)
# adding elements, appending
users.append('amy') # adding an element to end of list
new_users = []
new_users.append('val')
new_users.append('bob')
new_users.append('mia')
# inserting elements at particular position
new_users.insert(0, 'joe')
new_users.insert(3, 'bea')
print("My new list: ", new_users)
del users[2]
print("Deleted list: ", users)
# removing an item by its value
print(new_users.remove('mia'))
print("After remove: ", new_users)
# popping elements
"""If you want to work with an element that you are removing
from the list, you can 'pop' the element.
If you thing of the list as a stack of items, pop() takes an items,
pop() takes an item off the top of the stack.
By default pop() returns the last element in the list,
but you can also pop elements from any position in the list"""
# pop the last element from a list
most_recent_user = users.pop()
print(most_recent_user)
# pop the last item from a list
first_user = users.pop(0)
print(first_user)
# length of the list
num_users = len(new_users)
print("There are " + str(num_users) + " users in new_list.")
# list sorting
"""The sort() method changes the order of a list permanently.
The sorted() function returns a copy of the list, leaving the original list unchanged.
You can sort the items in a list in alphabetical order or reversed alpabetical order.
"""
# list sorting permanently
users.sort()
users.sort(reverse=True)
# sorting a list temporarily
print(sorted(users))
print(sorted(users, reverse = True))
# reversing the order of a list
users.reverse()
print(users)
# looping through the lists
"""Lists can have millions of items, so python provides an efficient way to loop through all the items in a list.
When you set up a loop, Python pulls each item from the list one at a time
and stores it in a temporary variable, which you provide a name for.
This name should be a singular version of the list name.
The indented block of code makes up the body of the loop, where you can work with each individual item.
Any lines that are not indented run after the loop is completed."""
# print all items and len in the list
for user in users:
print(user, len(user))
# printing a message for each item, and a seperate message afterwards
for user in users:
print("Welcome, " + user + "!")
print("Welcome you all!")
# The range() function
"""You can use the range() function to work with a set of numbers efficiently.
The range() functions starts at by default, and stops one number below the number passed to it.
You can use the list() function to efficiently generate a large list of numbers.
"""
# printing the numbers fron 0 to 1000
for numbers in range(1001): # syntax of range: range(start, stop, step)
print("Number: ", numbers)
# basic statstics in list:
ages = [25, 26, 28, 19, 18, 17, 35, 36]
sum_ages = sum(ages)
min_ages = min(ages)
max_ages = max(ages)
print("Sum of ages: ", sum_ages)
print("Youngest: ", min_ages)
print("Oldest: ", max_ages)
# slicing a list
"""You can work with any set of elements from a list.
A portion of a list is called a slice.
"""
# getting the 1st 3 elements:
finishers = ['kai', 'abe', 'ada', 'gus', 'zoe']
print("Finishers: ", finishers)
first_3 = finishers[:3]
print("Fisrt 3 finishers: ", first_3)
middle_3 = finishers[1:4]
print("Middile 3 finishers: ", middle_3)
last_3 = finishers[-3:]
print("Last 3 of finishers: ", last_3)
# copy of a list
copy_of_list = finishers[:]
print(copy_of_list)
# list comprehension
"""To use a list comprehension, define an expression for the values you want to store in the list.
Then write a for loop to generate input values needed to makes the list."""
square = []
for x in range(10):
square.append(x**2)
print(square)
squares = list(map(lambda x: x**2, range(10))) # list comprehension
print(squares)
"""another method
list comprehension"""
cubes = [x**3 for x in range(10)]
print(cubes)
"""a list comprehension consists of brackets containing an expression followed by a for clause,
then zero or more for or if clauses.
The result will be a new list resulting from evaluating the expression in the context of the for and if clauses which follow it.
For example: """
my_tuple = [(x, y) for x in [1, 2, 3] for y in [3, 1, 4] if x != y]
print(my_tuple)
my_tuple_2 = [(x, y) for x in [1, 2, 3] for y in [2, 2, 2] if x == y]
print(my_tuple_2)
"""nested list comprehension can be any arbitrary expression, including another list comprehenshion.
Consider the following example of 3 * 4 matrix implemented as a list of 3,
length of 4: """
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
]
new_mat = [[row[i] for row in matrix] for i in range(4)]
print(new_mat)
transposed = []
for i in range(4):
transposed.append([row[i] for row in matrix])
print(transposed)
"""using a loop to convert a list of names to upper case"""
names = ['kai', 'abe', 'ada', 'gus', 'zoe']
upper_names = []
for name in names:
upper_names.append(name.upper())
print(upper_names)
"""Another Methods"""
names_2 = ['kai', 'abe', 'ada', 'gus', 'zoe']
upper_names_2 = [nam.upper() for nam in names_2]
print(upper_names_2)
# tuples in the python
"""A tuple is like a list but it is immutable in nature.
We can overwrite an entire tuple but you can't change the individual elements in a tuple.
"""
"""Defining a tuple"""
dimensions = (800, 600)
for dimension in dimensions:
print(dimension)
dimensions = (800, 1080) # tuple over written
| StarcoderdataPython |
3328919 | import re
from ..base import Base
class Regex(Base):
_description = 'matching {}'
def _check(self, value):
return bool(re.search(self.value, value))
| StarcoderdataPython |
81667 | x=[2,25,34,56,72,34,54]
val=int(input("Enter the value you want to get searched : "))
for i in x:
if i==val:
print(x.index(i))
break
elif x.index(i)==(len(x)-1) and i!=val:
print("The Val u want to search is not there in the list")
| StarcoderdataPython |
93523 | <filename>lecciones/05/funciones.py
def mensaje(): # Declaración de la función
print("Estamos aprendiendo Python.")
print("Estamos aprendiendo instrucciones básicas.")
print("Poco a poco iremos avanzando.")
mensaje() # Llamada a la función
print("Ejecutando código fuera de función")
mensaje()
| StarcoderdataPython |
3280721 | <filename>django/contrib/flatpages/tests/views.py<gh_stars>1-10
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
class FlatpageViewTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.old_LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = '/accounts/login/'
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.LOGIN_URL = self.old_LOGIN_URL
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEquals(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '<EMAIL>', 's3krit')
self.client.login(username='testuser',password='<PASSWORD>')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get('/flatpage/')
self.assertEquals(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middlware is disabled"
response = self.client.get('/no_such_flatpage/')
self.assertEquals(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(1)
response = self.client.get('/flatpage_root/some.very_special~chars-here/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
| StarcoderdataPython |
1649238 |
transformers = []
def register_transformer(transformer):
transformers.append(transformer)
def find_transformer(extension=None, mime_type=None):
if not extension and not mime_type:
raise ValueError("Either extension or mime type should be specified")
info = None
for trans in transformers:
if extension and extension.lower() in trans["extensions"]:
info = trans
if mime_type and mime_type.lower().startswith(trans["mime_types"]):
info = trans
if not info:
return None
return info["class"]
def transformer(type_name, resource, url, query):
"""Get transformation module for resource of given type"""
mime_type = query.get('mimetype')
trans_class = find_transformer(extension=type_name, mime_type=mime_type)
if not trans_class:
trans_class = find_transformer(extension='*')
if not trans_class:
return None
return trans_class(resource, url, query)
class Transformer(object):
"""Data resource transformer - abstract class"""
def __init__(self, flow, url, query):
self.flow = flow
self.url = url
self.query = query
self.open_data = query['handler']
self.max_results = 500
self.mimetype = query.get('mimetype', None)
if "max-results" in query:
try:
self.max_results = int(query.get("max-results", 500))
except:
raise ValueError("max-results should be an integer")
if "audit" in query:
self.audit = True
else:
self.audit = False
def close_stream(self, handle):
if handle and hasattr(handle, 'close'):
handle.close()
def read_source_rows(self, src):
rows = []
record_count = 0
for row in src.rows():
rows.append(row)
record_count += 1
if self.max_results and record_count >= self.max_results:
break
result = {
"fields": src.field_names,
"data": rows
}
if self.max_results:
result["max_results"] = self.max_results
return result
def requires_size_limit(self):
'''Whether the transformer is subject to the 'limit', due to needing to
loading the whole file into memory to only read a sample of it.
'''
return True
| StarcoderdataPython |
1648452 | <gh_stars>0
import math
import numpy as np
import itk
from pathlib import Path
from typing import List, Optional, Sequence, Union, Callable
# frequently used types
from itk.itkImagePython import itkImageBase2 as Image2
from itk.itkImagePython import itkImageBase3 as Image3
from itk.support.types import ImageLike as AnyImage
from itk.support.types import itkCType
itkImage = Union[Image2, Image3]
ImageOrArray = Union[Image2, Image3, np.ndarray]
_COLLAPSE_STRATEGY_SUBMATRIX = 2
def identity(x: AnyImage) -> AnyImage:
return x
def as_image(x: AnyImage) -> AnyImage:
if isinstance(x, np.ndarray):
return itk.image_view_from_array(x)
return x
def as_array(x: AnyImage) -> np.ndarray:
if isinstance(x, np.ndarray):
return x
return itk.array_from_image(x)
def array_view_reverse_ordering(x: np.ndarray) -> np.ndarray:
return x.transpose(np.flip(np.arange(len(x.shape))))
def imread(filename: Path) -> itkImage:
"""Wrapper around itk.imread to avoid having to convert Path to str"""
return itk.imread(f"{filename}")
def imwrite(image: itkImage, filename: Path, compression: bool = False) -> None:
"""Wrapper around itk.imwrite to avoid having to convert Path to str"""
return itk.imwrite(image, f"{filename}", compression=compression)
def pixeltype(image: itkImage) -> itkCType:
"""Get pixel type"""
return itk.template(image)[1][0]
def make_image(
shape: Sequence[int],
spacing: Optional[Sequence[float]] = None,
value: Union[int, float] = 0,
pixel_type: itkCType = itk.UC,
) -> itkImage:
"""Create (2D/3D) image with specified shape and spacing"""
dim = len(shape)
region = itk.ImageRegion[dim]()
region.SetSize(shape)
region.SetIndex(tuple([0] * dim))
image = itk.Image[pixel_type, dim].New()
image.SetRegions(region)
if spacing:
if len(shape) != len(spacing):
raise ValueError("shape and spacing must have same dimension")
image.SetSpacing(spacing)
image.Allocate()
image[:] = value
return image
def extract_slices(img: Image3, axis: int = 2) -> List[Image2]:
"""Get 2D image slices from 3D image
Args:
img (Image3): 3d image
axis (int, optional): Axis perpendicular to slices. Defaults to 2, i.e. XY slices
Returns:
List[Image2]: [description]
"""
slices = []
size = itk.size(img)
region = itk.region(img)
region.SetSize(axis, 1)
for k in range(size[axis]):
region.SetIndex(axis, k)
slices.append(
itk.extract_image_filter(
img,
extraction_region=region,
direction_collapse_to_strategy=_COLLAPSE_STRATEGY_SUBMATRIX,
)
)
return slices
def scale_to_range(img: AnyImage, vmin: float = 0.0, vmax: float = 255.0) -> AnyImage:
"""Scale numpy itk.Image to fit in range [vmin,vmax]"""
x_view = as_array(img)
x_min, x_max = np.min(x_view), np.max(x_view)
x_view += vmin - x_min
np.multiply(x_view, vmax / (x_max - x_min), out=x_view, casting="unsafe")
np.clip(x_view, a_min=vmin, a_max=vmax, out=x_view)
return img
def resample(img: itkImage, target_spacing: Optional[Sequence] = None) -> itkImage:
"""resample (2D/3D) image to a target spacing
Args:
img (itkImage): input image
target_spacing (Optional[Sequence]): target spacing (defaults to 0.85)
Returns:
itkImage: resampled image
"""
dim = img.GetImageDimension()
interpolator = itk.LinearInterpolateImageFunction.New(img)
transform = itk.IdentityTransform[itk.D, dim].New()
if not target_spacing:
target_spacing = [0.85] * dim
size = itk.size(img)
spacing = itk.spacing(img)
for d in range(dim):
size[d] = math.ceil(size[d] * spacing[d] / target_spacing[d])
spacing[d] = target_spacing[d]
# resample to target resolution
resampled = itk.resample_image_filter(
img,
transform=transform,
interpolator=interpolator,
size=size,
output_spacing=spacing,
output_origin=itk.origin(img),
output_direction=img.GetDirection(),
)
return resampled
def resample_to_ref(img: itkImage, ref: itkImage) -> itkImage:
"""resample (2D/3D) image to a reference grid
Args:
img: input image
ref: reference image
Returns:
itkImage: resampled image
"""
dim = img.GetImageDimension()
interpolator = itk.LinearInterpolateImageFunction.New(img)
transform = itk.IdentityTransform[itk.D, dim].New()
# resample to target resolution
resampled = itk.resample_image_filter(
img,
transform=transform,
interpolator=interpolator,
size=itk.size(ref),
output_spacing=itk.spacing(ref),
output_origin=itk.origin(ref),
output_direction=ref.GetDirection(),
)
return resampled
def pad(
img: AnyImage, target_size: Sequence[int] = (256, 256), value: float = 0
) -> AnyImage:
"""Pad (2D/3D) image to the target size"""
size = itk.size(img)
delta = [t - min(s, t) for s, t in zip(size, target_size)]
if any(delta):
pad_lo = [(d + 1) // 2 for d in delta]
pad_hi = [delta[i] - p for i, p in enumerate(pad_lo)]
img = itk.constant_pad_image_filter(
img,
pad_lower_bound=pad_lo,
pad_upper_bound=pad_hi,
constant=value,
)
return img
def crop_center(img: AnyImage, target_size: Sequence[int] = (256, 256)) -> AnyImage:
"""Crop (2D/3D) image to the target size (centered)"""
size = itk.size(img)
delta = [max(s, t) - t for s, t in zip(size, target_size)]
if any(delta):
crop_low = [(d + 1) // 2 for d in delta]
crop_hi = [delta[i] - p for i, p in enumerate(crop_low)]
img = itk.crop_image_filter(
img,
lower_boundary_crop_size=crop_low,
upper_boundary_crop_size=crop_hi,
direction_collapse_to_strategy=_COLLAPSE_STRATEGY_SUBMATRIX,
)
return img
def crop(
img: AnyImage, target_offset: Sequence[int], target_size: Sequence[int] = (256, 256)
) -> AnyImage:
"""Crop (2D/3D) image to the target size/offset"""
region = itk.region(img)
region.SetIndex(target_offset)
region.SetSize(target_size)
return itk.extract_image_filter(
img,
extraction_region=region,
direction_collapse_to_strategy=_COLLAPSE_STRATEGY_SUBMATRIX,
)
def get_files(
dir: Path, predicate: Callable[[str], bool] = lambda f: f.endswith(".nii.gz")
) -> List[Path]:
"""Collect list of file names filtered by 'predicate'"""
return [f for f in Path(dir).glob("*.*") if predicate(f"{f}")]
| StarcoderdataPython |
3258709 | <filename>Juego del TaTeTi/script.py
# Diseñado por <NAME> - https://www.github.com/facundopadilla
# Éste contenido es de uso libre y contiene licencia MIT, por lo tanto, no me responsabilizo de daños y prejuicios en el caso de su uso y/o modificación.
# @author: facundopadilla
# @linkedin: https://www.linkedin.com/in/facundopadilla/
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from ui_tateti import Ui_MainWindow
import sys
class Tateti(QtWidgets.QMainWindow):
# -- Constructor --
def __init__(self):
super(Tateti, self).__init__() # inicializo y heredo los widgets
self.tateti = Ui_MainWindow() # creo mi atributo
self.tateti.setupUi(self) # cargo mi setupUi
# -- Variables --
self.turno = 1 # si vale 1, es del jugador 1, si es 2, es del jugador 2, pero siempre empieza el jugador 1
self.matriz = [[None, None, None],[None, None, None],[None, None, None]] # matriz de 3x3
self.contador = 0 # para verificar si hay empate o no
# --- Clicked buttons ---
# >>> Matriz de Tateti <<<
self.tateti.btn1.clicked.connect(self.btn1Clicked) # cuando hago click en el boton 1 (fila 1, columna 1)
self.tateti.btn2.clicked.connect(self.btn2Clicked) # cuando hago click en el boton 2 (fila 1, columna 2)
self.tateti.btn3.clicked.connect(self.btn3Clicked) # cuando hago click en el boton 3 (fila 1, columna 3)
self.tateti.btn4.clicked.connect(self.btn4Clicked) # cuando hago click en el boton 4 (fila 2, columna 1)
self.tateti.btn5.clicked.connect(self.btn5Clicked) # cuando hago click en el boton 5 (fila 2, columna 2)
self.tateti.btn6.clicked.connect(self.btn6Clicked) # cuando hago click en el boton 6 (fila 2, columna 3)
self.tateti.btn7.clicked.connect(self.btn7Clicked) # cuando hago click en el boton 7 (fila 3, columna 1)
self.tateti.btn8.clicked.connect(self.btn8Clicked) # cuando hago click en el boton 8 (fila 3, columna 2)
self.tateti.btn9.clicked.connect(self.btn9Clicked) # cuando hago click en el boton 9 (fila 3, columna 3)
# >>> Boton de "Comenzar juego" <<<
self.tateti.btnComenzar.clicked.connect(self.comenzar)
# --- Funciones ---
def comenzar(self):
# -- Limpiar las X y O --
self.tateti.btn1.setText("")
self.tateti.btn2.setText("")
self.tateti.btn3.setText("")
self.tateti.btn4.setText("")
self.tateti.btn5.setText("")
self.tateti.btn6.setText("")
self.tateti.btn7.setText("")
self.tateti.btn8.setText("")
self.tateti.btn9.setText("")
# -- Limpiar matriz --
self.matriz = [[None, None, None],[None, None, None],[None, None, None]]
# -- Activar casillas --
self.tateti.btn1.setEnabled(True)
self.tateti.btn2.setEnabled(True)
self.tateti.btn3.setEnabled(True)
self.tateti.btn4.setEnabled(True)
self.tateti.btn5.setEnabled(True)
self.tateti.btn6.setEnabled(True)
self.tateti.btn7.setEnabled(True)
self.tateti.btn8.setEnabled(True)
self.tateti.btn9.setEnabled(True)
# --- Reiniciar el contador ---
self.contador = 0
def validarGanador(self):
# --- Filas y columnas en X ---
if((self.matriz[0][0] == "x" and self.matriz[0][1] == "x" and self.matriz[0][2] == "x") # fila 1 completa de X
or (self.matriz[1][0] == "x" and self.matriz[1][1] == "x" and self.matriz[1][2] == "x") # fila 2 completa de X
or (self.matriz[2][0] == "x" and self.matriz[2][1] == "x" and self.matriz[2][2] == "x") # fila 3 completa de X
or (self.matriz[0][0] == "x" and self.matriz[1][0] == "x" and self.matriz[2][0] == "x") # columna 1 completa de X
or (self.matriz[0][1] == "x" and self.matriz[1][1] == "x" and self.matriz[2][1] == "x") # columna 2 completa de X
or (self.matriz[0][2] == "x" and self.matriz[1][2] == "x" and self.matriz[2][2] == "x") # columna 3 completa de X
or (self.matriz[0][0] == "x" and self.matriz[1][1] == "x" and self.matriz[2][2] == "x") # diagonal de izquierda a derecha de X
or (self.matriz[0][2] == "x" and self.matriz[1][1] == "x" and self.matriz[2][0] == "x")): # diagonal de derecha a izquierda de X
self.ganador("Ganador: jugador 1") # envío el ganador
# -- Filas y columnas en O ---
elif((self.matriz[0][0] == "o" and self.matriz[0][1] == "o" and self.matriz[0][2] == "o") # fila 1 completa de O
or (self.matriz[1][0] == "o" and self.matriz[1][1] == "o" and self.matriz[1][2] == "o") # fila 2 completa de O
or (self.matriz[2][0] == "o" and self.matriz[2][1] == "o" and self.matriz[2][2] == "o") # fila 3 completa de O
or (self.matriz[0][0] == "o" and self.matriz[1][0] == "o" and self.matriz[2][0] == "o") # columna 1 completa de O
or (self.matriz[0][1] == "o" and self.matriz[1][1] == "o" and self.matriz[2][1] == "o") # columna 2 completa de O
or (self.matriz[0][2] == "o" and self.matriz[1][2] == "o" and self.matriz[2][2] == "o") # clumna 3 completa de O
or (self.matriz[0][0] == "o" and self.matriz[1][1] == "o" and self.matriz[2][2] == "o") # diagonal de izquierda a derecha de O
or (self.matriz[0][2] == "o" and self.matriz[1][1] == "o" and self.matriz[2][0] == "o")): # diagonal de derecha a izquierda de O
self.ganador("Ganador: jugador 2") # envío el ganador
# -- Si nadie gana --
elif self.contador == 9:
self.ganador("Empate, nadie gana.")
def ganador(self, ganador):
alerta = QMessageBox() # creo una caja de mensaje (QMessageBox)
alerta.setWindowTitle("Ganador") # le defino un título
alerta.setText(f"{ganador}") # le indico quien es el ganador
mostrar_alerta = alerta.exec_() # la ejecuto y muestro en pantalla
def btn1Clicked(self):
if self.turno == 1: # si es el turno del primer jugador
self.tateti.btn1.setText("X") # le modifico el texto del botón por una X
self.matriz[0][0] = "x" # añado a la matriz la X
self.turno = 2 # modifico el turno y ahora es del jugador 2
elif self.turno == 2: # si es el turno del segundo jugador
self.tateti.btn1.setText("O") # le modifico el texto del botón por una O
self.turno = 1 # modifico el turno y ahora es del jugador 1
self.matriz[0][0] = "o" # añado a la matriz la X
self.tateti.btn1.setEnabled(False) # desactivo para que no haya modificación
self.contador += 1 # sumo el contador
self.validarGanador() # valido si ganó o no
def btn2Clicked(self):
if self.turno == 1:
self.tateti.btn2.setText("X")
self.matriz[0][1] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn2.setText("O")
self.matriz[0][1] = "o"
self.turno = 1
self.tateti.btn2.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn3Clicked(self):
if self.turno == 1:
self.tateti.btn3.setText("X")
self.matriz[0][2] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn3.setText("O")
self.matriz[0][2] = "o"
self.turno = 1
self.tateti.btn3.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn4Clicked(self):
if self.turno == 1:
self.tateti.btn4.setText("X")
self.matriz[1][0] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn4.setText("O")
self.matriz[1][0] = "o"
self.turno = 1
self.tateti.btn4.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn5Clicked(self):
if self.turno == 1:
self.tateti.btn5.setText("X")
self.matriz[1][1] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn5.setText("O")
self.matriz[1][1] = "o"
self.turno = 1
self.tateti.btn5.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn6Clicked(self):
if self.turno == 1:
self.tateti.btn6.setText("X")
self.matriz[1][2] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn6.setText("O")
self.matriz[1][2] = "o"
self.turno = 1
self.tateti.btn6.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn7Clicked(self):
if self.turno == 1:
self.tateti.btn7.setText("X")
self.matriz[2][0] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn7.setText("O")
self.matriz[2][0] = "o"
self.turno = 1
self.tateti.btn7.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn8Clicked(self):
if self.turno == 1:
self.tateti.btn8.setText("X")
self.matriz[2][1] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn8.setText("O")
self.matriz[2][1] = "o"
self.turno = 1
self.tateti.btn8.setEnabled(False)
self.contador += 1
self.validarGanador()
def btn9Clicked(self):
if self.turno == 1:
self.tateti.btn9.setText("X")
self.matriz[2][2] = "x"
self.turno = 2
elif self.turno == 2:
self.tateti.btn9.setText("O")
self.matriz[2][2] = "o"
self.turno = 1
self.tateti.btn9.setEnabled(False)
self.contador += 1
self.validarGanador()
# -- Inicio de la app --
app = QtWidgets.QApplication([])
application = Tateti()
application.show()
sys.exit(app.exec())
| StarcoderdataPython |
4830477 | <filename>story-pointr-backend/storypointr/apps/pointing/consumers.py
from channels.exceptions import DenyConnection
from apps.pointing.pointingroom import PointingRoom
from protocol.consumers.jsonrpcconsumer import AsyncJsonRpcWebsocketConsumer, JsonRpcException, rpc_method, rpc_notification
from protocol.events.eventmessage import EventMessage
from protocol.events.eventregistry import AsyncChannelGroupEventRegistry
import logging
from .pointingroom import PointingRoom
import uuid
logger = logging.getLogger(__name__)
class Event:
USER_JOINED = "USER_JOINED"
USER_LEFT = "USER_LEFT"
class UserConsumer(AsyncJsonRpcWebsocketConsumer):
async def connect(self):
"""
Connects to the room with the indicated room_code. The username must be unique
to the room that is being joined, otherwise the connection will be denied.
"""
try:
# If no room_code is specified, we will start a new room. Otherwise, join existing one
if 'room_code' in self.scope['url_route']['kwargs']:
self.room_code = self.scope['url_route']['kwargs']['room_code']
else:
self.room_code = uuid.uuid4()
self.username = self.scope['url_route']['kwargs']['username'].strip()
self.events = AsyncChannelGroupEventRegistry(self, self.room_code)
self.pointingRoom = PointingRoom(self.room_code)
self.pointingRoom.add_user(self.username)
await self.accept()
logger.info('User %s joined room %s', self.username, self.room_code)
await self.events.register_group_event(Event.USER_JOINED, self.on_user_joined)
await self.events.register_group_event(Event.USER_JOINED, self.on_user_left)
await self.events.fire_group_event(Event.USER_JOINED)
except Exception as ex:
logger.info('%s - Denying connection.', str(ex))
raise DenyConnection from ex
async def disconnect(self, close_code):
"""
Disconnects from the room and channel that the user is part of.
"""
logger.info('User %s leaving room %s', self.username, self.room_code)
self.pointingRoom.remove_user(self.username)
await self.events.fire_group_event(Event.USER_LEFT)
await self.events.unregister_group_events()
async def on_user_joined(self, username=None, **kwargs):
"""
Called when a user joins the room.
Args:
username {str} -- The username of the user that joined.
"""
logger.info('User %s.%s received user_joined event', self.room_code, self.username)
await self.send_json(EventMessage('user_joined', username=username))
async def on_user_left(self, username=None, **kwargs):
"""
Called when a user leaves the room.
Args:
username {str} -- The username of the user that left.
"""
logger.info('User %s.%s received user_left event', self.room_code, self.username)
await self.send_json(EventMessage('user_left', username=username))
@rpc_method
async def get_room_code(self):
"""Gets the code for the current room."""
return dict(room_code=self.pointingRoom.room_code)
@rpc_method
async def get_users(self):
"""Gets the list of users currently active within the room."""
return dict(users=self.pointingRoom.get_users())
@rpc_method
async def update_point(self, point):
"""Updates the current users point."""
self.pointingRoom.update_point(self.username, point)
| StarcoderdataPython |
3388001 | <reponame>Peilonrayz/typing_inspect_lib<gh_stars>1-10
import typing
from .helpers import PY_OLD, typing_
# TODO: reduce complexity
if PY_OLD: # noqa: MC0001
def get_base_type(type_): # pylint: disable=too-many-return-statements
"""Get the generic type and if it is unwrapped."""
ret_type = None
if isinstance(type_, typing._ProtocolMeta):
ret_type = typing_.BaseProtocol
elif isinstance(type_, typing_.ProtocolMeta):
ret_type = typing_.Protocol
elif isinstance(type_, typing.GenericMeta):
ret_type = typing.Generic
if ret_type is not None:
if type_.__origin__ is None:
return ret_type, True
else:
return ret_type, False
return None, None
else:
def get_base_type(type_): # pylint: disable=too-many-return-statements
if isinstance(type_, typing._ProtocolMeta):
return typing_.BaseProtocol, True
if isinstance(type_, typing_.ProtocolMeta):
return typing_.Protocol, True
if isinstance(type_, typing._GenericAlias):
if isinstance(type_.__origin__, typing._ProtocolMeta):
return typing_.BaseProtocol, False
elif isinstance(type_.__origin__, typing_.ProtocolMeta):
return typing_.Protocol, False
elif getattr(type_, '_special', False):
return typing.Generic, True
else:
return typing.Generic, False
if hasattr(type_, '__orig_bases__') and typing.Generic in type_.__mro__:
return typing.Generic, True
return None, None
| StarcoderdataPython |
3288265 | <filename>src/clusto/test/drivers/resourcemanagers/ipmanagertest.py
import clusto
from clusto.test import testbase
from clusto.drivers import IPManager, BasicServer, ResourceTypeException, ResourceException
import IPy
class IPManagerTest(testbase.ClustoTestBase):
def data(self):
ip1 = IPManager('a1', gateway='192.168.1.1', netmask='255.255.255.0',
baseip='192.168.1.0')
ip2 = IPManager('b1', gateway='10.0.128.1', netmask='255.255.252.0',
baseip='10.0.128.0')
ip3 = IPManager('c1', gateway='172.16.40.0', netmask='255.255.255.0',
baseip='172.16.40.0')
ip4 = IPManager('c2', gateway='172.16.0.0', netmask='255.255.0.0',
baseip='172.16.0.0')
s = BasicServer('s1')
def testBadIPAllocation(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
self.assertRaises(ResourceTypeException, ip1.allocate, s1, '10.2.3.4')
def testNewIP(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
num = 50
for i in range(num):
ip1.allocate(s1)
self.assertEqual(ip1.count, num)
self.assertEqual(len(ip1.resources(s1)), num)
self.assertEqual(ip1.owners('192.168.1.' + str(num+1)), [s1])
def testGetIPManager(self):
ip1, ip2 = map(clusto.get_by_name, ['a1', 'b1'])
self.assertEqual(ip1, IPManager.get_ip_manager('192.168.1.23'))
self.assertEqual(ip2, IPManager.get_ip_manager('10.0.129.22'))
def testGetIPManagers(self):
ip3, ip4 = map(clusto.get_by_name, ['c1', 'c2'])
self.assertEqual([ip3, ip4], IPManager.get_ip_managers('172.16.40.2'))
self.assertEqual([ip4], IPManager.get_ip_managers('172.16.0.2'))
self.assertEqual([], IPManager.get_ip_managers('192.168.40.1'))
def testGetIP(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
ip1.allocate(s1)
ip2.allocate(s1)
self.assertEqual(sorted(IPManager.get_ips(s1)),
sorted(['192.168.1.2', '10.0.128.2']))
def testReserveIP(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
ip2.allocate(ip2, '10.0.128.4')
self.assertRaises(ResourceException, ip2.allocate, s1, '10.0.128.4')
def testAdditionalAttrs(self):
ip1, ip2, s1 = map(clusto.get_by_name, ['a1', 'b1', 's1'])
ip1.allocate(s1, '192.168.1.20')
self.assertEqual(str(s1.attr_value(key='ip', subkey='ipstring')), '192.168.1.20')
self.assertEqual(str(s1.attr_value(key='ip', subkey='cidr')), '192.168.1.20/24')
| StarcoderdataPython |
1741756 | <filename>reactivex/internal/exceptions.py
# Rx Exceptions
from typing import Optional
class SequenceContainsNoElementsError(Exception):
def __init__(self, msg: Optional[str] = None):
super().__init__(msg or "Sequence contains no elements")
class ArgumentOutOfRangeException(ValueError):
def __init__(self, msg: Optional[str] = None):
super(ArgumentOutOfRangeException, self).__init__(
msg or "Argument out of range"
)
class DisposedException(Exception):
def __init__(self, msg: Optional[str] = None):
super().__init__(msg or "Object has been disposed")
class ReEntracyException(Exception):
def __init__(self, msg: Optional[str] = None):
super().__init__(msg or "Re-entrancy detected")
class CompletedException(Exception):
def __init__(self, msg: Optional[str] = None):
super().__init__(msg or "Observer completed")
class WouldBlockException(Exception):
def __init__(self, msg: Optional[str] = None):
super().__init__(msg or "Would block")
| StarcoderdataPython |
3393063 | <filename>featherembed.py<gh_stars>1-10
#!/usr/bin/env python
"""
Usage:
python featherembed.py --listcolumns /fh/fast/gilbert_p/grp/compass_hvtn602_aw/tmpdata/flow-data-2of9-responses.feather
python featherembed.py --columns CCR6,CCR7,CD154,CD45RA,CXCR3,GzA,HLA-DR,IFNg,IL13/4,IL17a,IL2,IL22,KLRG1,Perforin,TNFa /fh/fast/gilbert_p/grp/compass_hvtn602_aw/tmpdata/flow-data-2of9-responses.feather
"""
n_neighbors_help = """This parameter controls how UMAP balances local versus global
structure in the data. It does this by constraining the size of the
local neighborhood UMAP will look at when attempting to learn the
manifold structure of the data. This means that low values of
n_neighbors will force UMAP to concentrate on very local structure
(potentially to the detriment of the big picture), while large values
will push UMAP to look at larger neighborhoods of each point when
estimating the manifold structure of the data, loosing fine detail
structure for the sake of getting the broader of the data."""
min_dist_help = """The parameter controls how tightly UMAP is allowed to pack points
together. It, quite literally, provides the minimum distance apart
that points are allowed to be in the low dimensional representation.
This means that low values of min_dist will result in clumpier
embeddings. This can be useful if you are interested in clustering, or
in finer topological structure. Larger values of min_dist will prevent
UMAP from packing point together and will focus instead on the
preservation of the broad topological structure instead."""
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='UMAP for dimensionality reduction of a matrix stored in feather format.')
parser.add_argument('filename', type=str,
help='Path to the feather file.',
default='/fh/fast/gilbert_p/grp/compass_hvtn602_aw/tmpdata')
parser.add_argument('--metric', type=str,
help='A scipy distance metric, e.g. correlation, euclidean, manhattan',
default='correlation')
parser.add_argument('--out', type=str,
help='Out put filename.',
default='xxxx_out.feather')
parser.add_argument('--n_neighbors', type=int,
help=n_neighbors_help,
default=20)
parser.add_argument('--min_dist', type=float,
help=min_dist_help,
default=0.5)
parser.add_argument('--columns', type=str,
help='Comma-sperated list of columns to consider as input dimensions',
default='ALL')
parser.add_argument('--listcolumns', action='store_true', help='List the columns in the input feather file.')
args = parser.parse_args()
import numpy as np
import pandas as pd
import umap
import feather
import sys
if args.columns != 'ALL':
cols = args.columns.split(',')
# cols = [c for c in cols if c in fDf.columns]
# fDf = fDf[cols]
else:
cols = None
fDf = feather.read_dataframe(args.filename, columns=cols)
if args.listcolumns:
print(','.join(fDf.columns))
print('Rows: %d' % fDf.shape[0])
else:
umapObj = umap.UMAP(n_components=2, metric=args.metric, n_neighbors=args.n_neighbors, min_dist=args.min_dist)
xy = umapObj.fit_transform(fDf.values)
assert xy.shape[0] == fDf.shape[0]
xyDf = pd.DataFrame(xy, index=fDf.index, columns=['X', 'Y'])
if args.out == 'xxxx_out.feather':
args.out = args.filename[:-len('.feather')] + '_out' + '.feather'
feather.write_dataframe(xyDf, args.out)
print('Successfully applied UMAP: %s' % args.out) | StarcoderdataPython |
1659716 | #!/usr/bin/env python
import argparse
import anydbm
import sys
import BaseHTTPServer
import SocketServer
class MetricRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
db_filename = None
def do_GET(self):
database = anydbm.open(self.db_filename)
output = ['%s %s' % (k, database[k]) for k in database.keys()]
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
print >>self.wfile, '\n'.join(output)
def parse_args(argv):
parser = argparse.ArgumentParser(
description='Output the metrics stored in a dbm file')
parser.add_argument(
'--db_file',
metavar='DB_FILE',
required=True,
help='The DBM file to read the metrics from')
parser.add_argument(
'--port',
metavar='PORT',
type=int,
default=8080,
help='The port on which the server should run')
return parser.parse_args(argv)
def main(argv):
args = parse_args(argv[1:])
MetricRequestHandler.db_filename = args.db_file
httpd = SocketServer.TCPServer(("", args.port), MetricRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
3309404 | from gameai.core import Algorithm
class Minimax(Algorithm):
'''
Implementation of the minimax algorithm.
Attributes:
horizon (int): The max depth of the search. Defaults to infinity. Note that if this
is set then the game's hueristic is used
'''
def __init__(self, horizon=float('inf')):
self.horizon = horizon
def best_action(self, g, s, p):
actions = g.action_space(s)
rewards = [self.min_play(g, g.next_state(s, a, p), 1-p, 0)
for a in actions]
return actions[rewards.index(max(rewards))]
def min_play(self, g, s, p, depth):
'''
Get the smallest value of all the child nodes
Args:
g (Game): The game
s (any): The state of the game upon execution
p (int): The current player (who is about to make a move)
depth (int): The current depth of the search tree
Returns:
int: The smallest value of all the child states
'''
actions = g.action_space(s)
if g.terminal(s) or depth > self.horizon:
return g.reward(s, 1-p)
return min([self.max_play(g, g.next_state(s, a, p), 1-p, depth+1) for a in actions])
def max_play(self, g, s, p, depth):
'''
Get the largest value of all the child nodes
Args:
g (Game): The game
s (any): The state of the game upon execution
p (int): The current player (who is about to make a move)
depth (int): The current depth of the search tree
Returns:
int: The largest value of all the child states
'''
actions = g.action_space(s)
if g.terminal(s) or depth > self.horizon:
return g.reward(s, p)
return max([self.min_play(g, g.next_state(s, a, p), 1-p, depth+1) for a in actions])
| StarcoderdataPython |
1716562 | <reponame>AngelinNjakasoa/Range-analysis-experiments<filename>main.py
#!/usr/bin/python2.7
# coding: utf8
"""
Test the Range Analysis on a python source code
"""
import sys
import ast
import visitor_iteration
def main():
"""
Takes a python source code file as arguments and performs
a range analysis
"""
if len(sys.argv) == 2:
source_file = open(sys.argv[1]).read()
ast_node = ast.parse(source_file)
visitor = visitor_iteration.VisitorRangeAbstract()
try:
visitor.visit(ast_node)
except Exception:
print "Invalid source code"
else:
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4830312 | <reponame>tim-fiola/nautobot-plugin-version-control
""" This is the main module that contains the code for the Dolt backed Version Control plugin. """
try:
from importlib import metadata
except ImportError:
# Python version < 3.8
import importlib_metadata as metadata
from django.db.models.signals import pre_migrate, post_migrate
import django_tables2
from nautobot.extras.plugins import PluginConfig
from nautobot_version_control.migrations import auto_dolt_commit_migration
__version__ = metadata.version(__name__)
class NautobotVersionControl(PluginConfig):
"""NautobotVersionControl initializes the dolt configs, middleware, and sets up migrations."""
name = "nautobot_version_control"
verbose_name = "Nautobot Version Control"
description = "Nautobot Version Control with Dolt"
base_url = "version-control"
version = __version__
author = "Network to Code, LLC"
author_email = "<EMAIL>"
min_version = "1.2.0-beta.1"
max_version = "1.999"
required_settings = []
default_settings = {
# TODO: are these respected?
# this is also set in /development/nautobot_config.py
"DATABASE_ROUTERS": [
"nautobot_version_control.routers.GlobalStateRouter",
],
"SESSION_ENGINE": "django.contrib.sessions.backends.signed_cookies",
"CACHEOPS_ENABLED": False,
}
middleware = [
"nautobot_version_control.middleware.dolt_health_check_intercept_middleware",
"nautobot_version_control.middleware.DoltBranchMiddleware",
"nautobot_version_control.middleware.DoltAutoCommitMiddleware",
]
def ready(self):
super().ready()
# disable the GlobalStateRouter during migrations.
pre_migrate.connect(switch_global_router_off, sender=self)
post_migrate.connect(switch_global_router_on, sender=self)
# make a Dolt commit to save database migrations.
post_migrate.connect(auto_dolt_commit_migration, sender=self)
config = NautobotVersionControl # pylint: disable=C0103
def query_registry(model, registry):
"""Performs a lookup on a content type registry.
Args:
model: a Django model class
registry: a python dictionary like
```
{
"my_app_label": True,
"my_other_model": {
"my_model": True,
},
}
```
The type of `<value>` is specific to each
registry. A return value of `None` signals
that nothing is registered for that `model`.
"""
app_label = model._meta.app_label
model = model.__name__.lower()
if app_label not in registry:
return None
if not isinstance(registry[app_label], dict):
return registry[app_label]
# subset specified
if model not in registry[app_label]:
return None
return registry[app_label][model]
# Registry of Content Types of models that should be under version control.
# Top-level dict keys are app_labels. If the top-level dict value is `True`,
# then all models under that app_label are allowlisted.The top-level value
# may also be a nest dict containing a subset of version-controlled models
# within the app_label.
__VERSIONED_MODEL_REGISTRY___ = {
"nautobot_version_control": {
# Pull Requests are not versioned
"pullrequest": False,
"pullrequestreviewcomments": False,
"pullrequestreviews": False,
"branchmeta": False,
"branch": False,
# todo: calling the following "versioned" is odd.
# their contents are parameterized by branch
# changes, but they are not under VCS.
"commit": True,
"commitancestor": True,
"conflicts": True,
"constraintviolations": True,
},
"dcim": True,
"circuits": True,
"ipam": True,
"virtualization": True,
"taggit": True,
"tenancy": True,
"extras": {
# TODO: what should be versioned from `extras`?
"computedfield": True,
"configcontext": True,
"configcontextschema": True,
"customfield": True,
"customfieldchoice": True,
"customlink": True,
"exporttemplate": True,
# "gitrepository": True,
"graphqlquery": True,
"imageattachment": True,
# "job": True,
# "jobresult": True,
"objectchange": True,
"relationship": True,
"relationshipassociation": True,
"secret": True,
"secretsgroup": True,
"status": True,
"tag": True,
"taggeditem": True,
"webhook": True,
},
}
def is_versioned_model(model):
"""
Determines whether a model's is under version control.
See __MODELS_UNDER_VERSION_CONTROL__ for more info.
"""
registry = __VERSIONED_MODEL_REGISTRY___
return bool(query_registry(model, registry))
def register_versioned_models(registry):
"""Register additional content types to be versioned.
Args:
registry: a python dict of content types that
will be placed under version control:
```
{
"my_app_label": True,
"my_other_model": {
"my_model": True,
},
}
```
"""
err = ValueError("invalid versioned model registry")
for key, val in registry.items():
if not isinstance(key, str):
# key must be string
raise err
if isinstance(val, bool):
# val may be bool
continue
if not isinstance(val, dict):
# val must be dict if not bool
raise err
# validate nested dict
for k, v in val.items():
if not isinstance(k, str):
# k must be string
raise err
if not isinstance(v, bool):
# v must be bool
raise err
__VERSIONED_MODEL_REGISTRY___.update(registry)
__DIFF_TABLE_REGISTRY__ = {}
def diff_table_for_model(model):
"""
Returns a table object for a model, if it exists in
the ` __DIFF_TABLE_REGISTRY__`.
"""
return query_registry(model, __DIFF_TABLE_REGISTRY__)
def register_diff_tables(registry):
"""Register additional tables to be used in diffs.
Registry values must be subclasses of django_tables2.Table.
Args:
registry: a python dict of content types that
will be placed under version control:
```
{
"my_app_label": True,
"my_other_model": {
"my_model": True,
},
}
```
"""
err = ValueError("invalid diff table registry")
for key, val in registry.items():
if not isinstance(key, str):
# key must be string
raise err
if not isinstance(val, dict):
# val must be dict
raise err
for k, v in val.items():
if not isinstance(k, str):
# k must be string
raise err
if not issubclass(v, django_tables2.tables.Table):
# v must be Table
raise err
__DIFF_TABLE_REGISTRY__.update(registry)
__GLOBAL_ROUTER_SWITCH__ = True
def is_global_router_enabled():
"""Returns true if the __GLOBAL_ROUTER_SWITCH__ is turned on"""
global __GLOBAL_ROUTER_SWITCH__ # pylint: disable=W0602
return __GLOBAL_ROUTER_SWITCH__
def switch_global_router_on(**kwargs):
"""Sets __GLOBAL_ROUTER_SWITCH to true"""
global __GLOBAL_ROUTER_SWITCH__
__GLOBAL_ROUTER_SWITCH__ = True
def switch_global_router_off(**kwargs):
"""Sets __GLOBAL_ROUTER_SWITCH to false"""
global __GLOBAL_ROUTER_SWITCH__
__GLOBAL_ROUTER_SWITCH__ = False
| StarcoderdataPython |
3398443 | <gh_stars>1-10
#! /usr/bin/python3.5
# -*- coding: utf-8
import numpy as np
import re
def read_data(file):
"""
Give a .dat file, we read the:
- n
- W
- D
"""
opened_file = open(file)
n = int(opened_file.readline().strip('\n'))
W = np.zeros((n, n))
D = np.zeros((n, n))
opened_file.readline()
for iterator in range(n):
# the separator is a space
line = opened_file.readline().strip('\n').split(' ')
# we put the reading line in the matrix
if '' in line:
line.remove('')
W[iterator, :] = line
opened_file.readline()
for iterator in range(n):
# i use regex because your way to separate element is aweful
line = opened_file.readline()
D[iterator, :] = re.findall(r"[0-9]+", line)
return [n, W, D]
def fitness(W, D, sol):
"""
Fitness will compute the value of our solution.
We assume that:
- W is the flow matrix
- D is the distance matrix
- sol is a solution
We assume that W,D are some numpy matrix
Sol is a list.
"""
res = 0
for i in range(len(sol)):
for j in range(i+1, len(sol)):
# we take the object i in sol and object j
# we substract 1 beacause we count from 0
res += W[i, j] * D[sol[i] - 1, sol[j] - 1]
return 2 * res
def delta(W, D, sol, i, j):
"""
We compute the delta between two solution.
We assurme that:
- W ia the flow matrix
- D is the distance matrix
- sol is the solution
- i is the index of first moved object
- j is the index of second moved object
We assume that W,D are numpy matrix
Sol is a list.
"""
res = 0
# we reduce i,j because we count from 0
j -= 1
i -= 1
for k in range(len(sol)):
if k != i and k != j:
res += (W[j, k] - W[i, k]) * (D[sol[i] - 1, sol[k] - 1] - D[sol[j] - 1, sol[k] - 1])
return 2 * res
def generate_neighbor(W, D, sol):
"""
We generate all solution. We use the same sum as the fitness.
We have:
-W : the flow matrix
-D : the distance matrix
-sol : a solution
"""
res = []
fitness_sol = fitness(W=W, D=D, sol=sol)
for i in range(len(sol)):
for j in range(i + 1, len(sol)):
neighbor = list(sol)
# we swap the i and j objects
neighbor[i], neighbor[j] = neighbor[j], neighbor[i]
# we compute the fitness
new_fitness_sol = fitness_sol + delta(W=W, D=D, sol=neighbor, i=i, j=j)
r = i
s = j
object_i = sol[r]
object_j = sol[s]
res.append([new_fitness_sol, neighbor, object_i, r, object_j, s])
return res
class ListeTabou:
"""
It's a matrix to represente the forbbiden assignation.
It has a size = nxn.
"""
def __init__(self, listeTabouSize):
self.listeTabou = np.zeros((listeTabouSize, listeTabouSize))
def add(self, i, r, j, s, t, l):
# we decrease i,j because it's some object
# and the object are 1 to N not 0 to N-1
i -= 1
j -= 1
self.listeTabou[i, r] = t + l
self.listeTabou[j, r] = t + l
def permitted(self, i, j, r, s, t):
return self.listeTabou[i, r] <= t and self.listeTabou[j, s] <= t
def random_solution(W, D, n):
"""
We generate a solution of size n
"""
sol = np.random.permutation(n) + 1
fitness_sol = fitness(W=W, D=D, sol=sol)
return [fitness_sol, sol]
def choose_neighbor(W, D, sol, best, t, l, listeTabou):
"""
We choose a neighbor according to tabou liste and asspiraiton
"""
neighbors = generate_neighbor(W=W, D=D, sol=sol)
neighbors.sort()
for ele in neighbors:
[fitness, sol, i, r, j, s] = ele
# if we have a new best element
if ele[0] < best[0]:
# we add this movement to the matrix
listeTabou.add(i=i, r=r, j=j, s=s, t=t, l=l)
best = [fitness, sol]
return [best, best]
else:
if listeTabou.permitted(i=i - 1, j=j - 1, r=r, s=s, t=t):
listeTabou.add(i=i, r=r, j=j, s=s, t=t, l=l)
return [best, [fitness, sol]]
return [best, [fitness(W, D, sol), sol]]
def tabouSearch(file, l, t_max):
"""
It combines all previous function to build a tabou search
"""
# read file for n, W, D
[n, W, D] = read_data(file)
# generate solution
sol = random_solution(W=W, D=D, n=n)
# at begining the best is the sol
best = list(sol)
# we define t and l
t = 0
if l != 1:
l = round(l * n)
# the list tabou
listeTabou = ListeTabou(n)
acc = []
# loop
while t != t_max:
# we choose a solution
[best, sol] = choose_neighbor(W=W, D=D, sol=sol[1], best=best, t=t, l=l, listeTabou=listeTabou)
acc.append(sol[0])
t += 1
return [best, acc]
def runTenTimesDat(file, l, t_max):
"""
We run ten times the algorithme on a certain problem with
a certain l and t_max
"""
acc = []
best = []
for ele in range(10):
res = tabouSearch(file=file, l=l, t_max=t_max)
acc.append(res[1])
best.append(res[0])
print("For l= " + str(l) + "\tBest = " + str(min(best)[0]) + "\tmean = " + str(np.mean(acc)) + "\tstd. dev = " + str(np.sqrt(np.var(acc))))
def generate_data(n):
"""
We generate data for a QAP problem.
Two symterical matrix
"""
# we create the matrix of the right size with random integers
D = np.random.random_integers(0, 5, size=(n, n))
W = np.random.random_integers(0, 10, size=(n, n))
# we create symetrical matrix
D = np.tril(D) + np.tril(D, -1).T
W = np.tril(W) + np.tril(W, -1).T
# we define the diag to 0
np.fill_diagonal(D, 0)
np.fill_diagonal(W, 0)
# we write data in file
data = open(str(n) + ".dat", 'w+')
data.write(str(n) + "\n\n")
# we write the D matrix
for line in D:
string = ""
for ele in line:
string += str(ele) + " "
data.write(string + "\n")
data.write("\n")
# same thing for the W
for line in W:
string = ""
for ele in line:
string += str(ele) + " "
data.write(string + "\n")
if __name__ == '__main__':
runTenTimesDat(file="1.dat", l=1, t_max=5000)
runTenTimesDat(file="1.dat", l=0.5, t_max=5000)
runTenTimesDat(file="1.dat", l=0.9, t_max=5000)
generate_data(40)
generate_data(50)
generate_data(80)
generate_data(100)
| StarcoderdataPython |
2846 | <reponame>GuilhermeEsdras/Grafos<gh_stars>0
from Roteiro4.Roteiro4__funcoes import Grafo
class Grafos:
# Grafo da Paraíba
paraiba = Grafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
for aresta in ['J-C', 'C-E', 'C-E', 'C-P', 'C-P', 'C-M', 'C-T', 'M-T', 'T-Z']:
paraiba.adicionaAresta(aresta)
# --- #
# Grafo Completo
grafo_completo = Grafo(['J', 'C', 'E', 'P'])
for aresta in ['J-C', 'J-P', 'J-E', 'C-E', 'C-P', 'P-E']:
grafo_completo.adicionaAresta(aresta)
# --- #
# K3
k3 = Grafo(['A', 'B', 'C'])
for aresta in ['A-B', 'B-C', 'C-A']:
k3.adicionaAresta(aresta)
# --- #
| StarcoderdataPython |
1796156 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 17 12:38:15 2021
@author: Oscar
"""
# Golden Search, mimics the secant method, but for finding the Global Max and min (optimization of a function)
# Strategy in selecting the bounds of the interval:
# l0 = distance between estimate,
# l0 = l1+l2 ; l1/l0 = l2/l1
# R = (l2/l1)**-1 (reciprocal)
# From substitution : 1 +R = 1/R -> R**2 + R - 1 = 0
# R = [sqrt(5)-1]/2 <- GOLDEN RATIO
# d = R(x_u - x_l)
#x1 = x_l + d ; x2 = x_u - d
import numpy as np
import math
import matplotlib.pyplot as plt
"""
Interval Selection
"""
# Parameters
xu = 20 #int(input("Please choose a upper bound: "))
xl = -20 #int(input("Please choose a lower bound: "))
N = 100 #int(input("Please choose Maxt number of iterations: "))
# Golden Ratio
R = (math.sqrt(5) - 1)/2
"""
Evaluation of the Function
"""
# Evaluated function
f = lambda x: 2*np.sin(x) - x**2/10
def GoldenSearchMax(xu, xl, f, N):
for i in range(0, N-1):
# Intermediate points
d = R*(xu - xl)
x1 = xl + d
x2 = xu - d
fx1, fx2 = f(x1), f(x2)
if fx1 > fx2 :
xl = x2
elif fx1 < fx2:
xu = x1
else:
#print("The local maxima is located at:", x1, fx1)
break
return x1, fx1
def GoldenSearchMin(xu, xl, f, N):
for i in range(0, N-1):
# Intermediate points
d = R*(xu - xl)
x1 = xl + d
x2 = xu - d
fx1, fx2 = f(x1), f(x2)
if fx1 < fx2 :
xl = x2
elif fx1 > fx2:
xu = x1
else:
#print("The local minima is located at:", x1, fx1)
break
return x1, fx1
# Arrays to store the numbers
Max = GoldenSearchMax(xu, xl, f, N)
Min = GoldenSearchMin(xu, xl, f, N)
print('The local max and min of the interval is:', Max, Min)
# Initializing Arrays
x_value = np.linspace(xl, xu, N-1)
y_value = np.zeros(N-1)
# Populating y_array
for k in range(N-1):
y_value[k] = f(x_value[k])
# Plotting the function f
plt.plot(x_value ,y_value)
plt.scatter(Max[0], Max[1], label = 'Maxima', color = 'r')
plt.scatter(Min[0], Min[1], label = 'Maxima', color = 'g')
plt.legend(['Function', 'Maxima', 'Minima'])
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| StarcoderdataPython |
3368373 | <reponame>mdomarsaleem/Facial_Plan<filename>Database creation/database_cv.py
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import scipy.misc
import cPickle
import os
import time
os.chdir("//home//pi/Desktop//Image_db/")
import warnings
warnings.filterwarnings('error', category=DeprecationWarning)
def rgb2gray(rgb):
return np.dot(rgb[:,:,:], [0.299, 0.587, 0.114])
def standard(X):
return (X - X.mean())/X.max()
def Pre_Process(face):
from skimage.transform import resize
X = standard(resize(face,(96,96))).reshape(-1,1,96,96)
X_normal = X.reshape(-1,9216)
return X,X_normal
# load it again
with open('/home/pi/Desktop/files/linear_model.pkl', 'rb') as fid:
Net = cPickle.load(fid)
map = np.load('/home/pi/Desktop/files/map.npy')
#print map
#face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/opencv-2.4.13/data/haarcascades_GPU/haarcascade_frontalface_default.xml')
#face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/opencv-2.4.13/data/haarcascades/haarcascade_frontalface_default.xml')
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/opencv-2.4.13/data/lbpcascades/lbpcascade_frontalface.xml')
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (1000, 750)
camera.framerate = 15
camera.zoom = (0,0,0.75,0.75)
rawCapture = PiRGBArray(camera, size=(1000, 750))
cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Video',640,480)
i = 0
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
frame = frame.array
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
start_time = time.time()
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(90, 90)
)
#print("--- %s seconds ---" % (time.time() - start_time))
# Draw a rectangle around the faces
if len(faces)>0:
for (x, y, w, h) in faces:
i +=1
fac = np.array(frame)[y:(y+h),x:(x+h),:]
fac_gray = np.array(gray)[y:(y+h),x:(x+h)]
X,X_normal = Pre_Process(fac_gray)
Probability = Net.predict_proba(X.reshape(-1,9216))
prob = np.amax(Probability)
#print Probability
index = np.argmax(Probability)
#print index
cv2.rectangle(frame, (x, y), (x+h, y+w), (0, 255, 0), 2)
#cv2.putText(frame,'omar',(x,y+h), cv2.FONT_HERSHEY_DUPLEX,1,(0,0,255), 2,8)
#cv2.putText(frame,str(map[index])+' '+str(round(prob*100,2) )+'%',(x,y), cv2.FONT_HERSHEY_DUPLEX,1,(255,255,255), 1,2)
print("--- %s seconds ---" % (time.time() - start_time))
scipy.misc.toimage(cv2.cvtColor(fac, cv2.COLOR_RGB2BGR)).save(time.strftime('%Y-%m-%d')+'_'+str(i) +'.jpg')
# Display the resulting frame
cv2.imshow('Video', frame)
#time.sleep(0.1)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if time.localtime(time.time()).tm_hour == 20:
break
#os.system("shutdown now -h")
# When everything is done, release the capture
cv2.destroyAllWindows()
| StarcoderdataPython |
1665426 | # import the necessary packages
from Automation.Web import wikipedia, movie_review, play_song, recommendation, open_website
from Automation.OS.Windows import open_installed_apps, create_and_write_file
from Senses import listen, speak
import os
import platform
import subprocess
def assist(command):
"""if statements for executing commands"""
if "information" in command:
speak.tokioResponse("information about what?")
topic = listen.myCommand()
try:
bot = wikipedia.info()
bot.get_info(topic)
except Exception as e:
print(e)
elif "review" in command:
speak.tokioResponse("which movie?")
movie_name = listen.myCommand()
try:
bot = movie_review.Movie()
bot.movie_review(movie_name)
except Exception as e:
print(e)
elif "play song" in command:
speak.tokioResponse("which song?")
song_name = listen.myCommand()
try:
bot = play_song.Music()
bot.fromytvideo(song_name)
except Exception as e:
print(e)
elif "recommend" in command:
try:
bot = recommendation.IMDBlatestBestMovies()
bot.recommend()
except Exception as e:
print(e)
elif "open website" in command:
speak.tokioResponse("which website?")
website_name = listen.myCommand()
try:
bot = open_website.Website()
bot.open_website(website_name)
except Exception as e:
print(e)
else:
# Check for OS
if platform.system().lower() == 'windows':
if "open" in command:
speak.tokioResponse("which app?")
app_name = listen.myCommand()
try:
bot = open_installed_apps.Apps()
bot.openApps(app_name)
speak.tokioResponse("opened " + app_name + "sir")
except Exception as e:
print(e)
elif "create new" in command:
speak.tokioResponse("What should I make note of")
list_of_commands = []
while listen.myCommand() != 'stop please':
list_of_commands.append(listen.myCommand() + '\n')
try:
bot = create_and_write_file.CreateNewFile()
bot.takenote(list_of_commands)
speak.tokioResponse("Note taken sir!!")
except Exception as e:
print(e)
| StarcoderdataPython |
3367195 | <filename>layers.py
import tensorflow as tf
def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,
padding='SAME', groups=1, weights=None, biases=None):
"""Create a convolution layer.
Adapted from: https://github.com/ethereon/caffe-tensorflow
"""
# Get number of input channels
input_channels = int(x.get_shape()[-1])
# Create lambda function for the convolution
convolve = lambda i, k: tf.nn.conv2d(i, k,
strides=[1, stride_y, stride_x, 1],
padding=padding)
with tf.variable_scope(name) as scope:
if weights is None:
# Create tf variables for the weights and biases of the conv layer
weights = tf.get_variable('weights', shape=[filter_height,
filter_width,
input_channels / groups,
num_filters])
if biases is None:
biases = tf.get_variable('biases', shape=[num_filters])
if groups == 1:
conv = convolve(x, weights)
# In the cases of multiple groups, split inputs & weights and
else:
# Split input and weights and convolve them separately
input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=weights)
output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]
# Concat the convolved output together again
conv = tf.concat(axis=3, values=output_groups)
# Add biases
bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))
# Apply relu function
relu = tf.nn.relu(bias, name=scope.name)
return relu
def fc(x, num_in, num_out, name, relu=True, weights=None, biases=None):
"""Create a fully connected layer."""
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases
if weights is None:
weights = tf.get_variable('weights', shape=[num_in, num_out])
if biases is None:
biases = tf.get_variable('biases', [num_out])
# Matrix multiply weights and inputs and add bias
act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)
if relu:
# Apply ReLu non linearity
relu = tf.nn.relu(act)
return relu
else:
return act
def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,
padding='SAME'):
"""Create a max pooling layer."""
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],
strides=[1, stride_y, stride_x, 1],
padding=padding, name=name)
def lrn(x, radius, alpha, beta, name, bias=1.0):
"""Create a local response normalization layer."""
return tf.nn.local_response_normalization(x, depth_radius=radius,
alpha=alpha, beta=beta,
bias=bias, name=name)
def dropout(x, keep_prob):
"""Create a dropout layer."""
return tf.nn.dropout(x, keep_prob)
| StarcoderdataPython |
1701345 | """Provides multi-point element-wise operations such as ``contains``."""
from shapely import speedups
from ._vectorized import (contains, touches)
| StarcoderdataPython |
3238774 | <filename>fastsc/coloring/color_opt.py
from fastsc.util import get_map_circuit, get_layer_circuits
import networkx as nx
import numpy as np
from fastsc.models import IR, Qbit, Inst
from .util import relabel_coloring, get_qubits, decompose_layer, decompose_layer_flexible, reschedule_layer, limit_colors, get_max_time
from fastsc.util import get_connectivity_graph, get_aug_line_graph
from fastsc.models import Sycamore_device
import z3
def smt_find(smt_dict, omega_lo, omega_hi, num_color, alpha, verbose, threshold = None):
# concert omega and alpha to MHz, so that they are int
omega_lo = int(omega_lo * 1000)
omega_hi = int(omega_hi * 1000)
alpha = int(alpha * 1000)
if (omega_lo, omega_hi, num_color, alpha) in smt_dict:
if verbose == 0:
print("Found existed entry.")
return smt_dict[(omega_lo, omega_hi, num_color, alpha)]
if threshold != None:
threshold = int(threshold * 1000)
else:
thr_lo = -alpha // 10
thr_hi = (omega_hi-omega_lo)//num_color
max_iter = 10
it = 0
thr = thr_lo
if verbose == 0:
print("Start: ", thr_lo, thr_hi, num_color)
while it <= max_iter and thr_hi - thr_lo > 1:
if verbose == 0:
print("iter", it)
thr = thr_lo + (thr_hi - thr_lo) // 2
c = [z3.Int('c%d' % i) for i in range(num_color)]
s = z3.Solver()
for i in range(num_color):
s.add(c[i] > omega_lo, c[i] < omega_hi)
for i in range(num_color):
for j in range(num_color):
if i != j:
s.add((c[i]-c[j])**2 > thr**2, (c[i]-c[j]+alpha)**2 > thr**2)
if s.check() == z3.sat:
thr_lo = thr + 1
else:
thr_hi = thr - 1
it += 1
threshold = thr
if verbose == 0:
print("Threshold: ", threshold)
c = [z3.Int('c%d' % i) for i in range(num_color)]
s = z3.Solver()
for i in range(num_color):
s.add(c[i] > omega_lo, c[i] < omega_hi)
for i in range(num_color):
for j in range(num_color):
if i != j:
s.add((c[i]-c[j])**2 > threshold**2, (c[i]-c[j]+alpha)**2 > threshold**2)
if s.check() == z3.sat:
m = s.model()
result = (True, [float(m.evaluate(c[i]).as_long())/1000.0 for i in range(num_color)])
else:
result = (False, [])
if verbose == 0:
print(result)
smt_dict[(omega_lo, omega_hi, num_color, alpha)] = result
return result
def color_opt(device, circuit, scheduler, d, decomp, lim_colors, verbose):
freqsdata = []
gatesdata = []
width = device.side_length
height = device.side_length
num_q = device.qubits
omega_max = device.omega_max
omega_min = device.omega_min
delta_int = device.delta_int
delta_ext= device.delta_ext
delta_park = device.delta_park
ALPHA = device.alpha
G_connect = device.g_connect
#G_connect = get_connectivity_graph(width*height, 'grid')
park_coloring = nx.coloring.greedy_color(G_connect)
num_park = len(set(park_coloring.values()))
G_crosstalk = device.g_xtalk
#G_crosstalk = get_aug_line_graph(width, height, d)
coupling = device.coupling
Cqq = device.cqq
q_arr = get_qubits(circuit)
smt_dict = dict()
def _build_park_color_map():
# negative colors for parking, non-negative colors for interaction.
colormap = dict()
(sat, omegas) = smt_find(smt_dict, omega_min, omega_min + delta_park, num_park, ALPHA, verbose, None)
if not sat:
step_park = delta_park / num_park
omegas = [omega_max - delta_int - delta_ext - c * step_park for c in num_park]
if verbose == 0:
print("Warning: SMT not satisfied for idle freq.")
for c in range(num_park):
colormap[str(-(c+1))] = omegas[c]
return colormap
def _add_int_color_map(colormap, n_int):
if decomp == 'cphase' or decomp == 'flexible':
if n_int > 5:
step_int = (delta_int + ALPHA) / n_int
omegas = [omega_max + ALPHA - c * step_int for c in range(n_int)]
else:
(sat, omegas) = smt_find(smt_dict, omega_max-delta_int, omega_max + ALPHA, n_int, ALPHA, verbose, None)
if not sat:
step_int = (delta_int + ALPHA) / n_int
omegas = [omega_max + ALPHA - c * step_int for c in range(n_int)]
if verbose == 0:
print("Warning: SMT not satisfied for int freq.")
for c in range(n_int):
colormap[str(c)] = omegas[c]
else:
if n_int > 5:
step_int = delta_int / n_int
omegas = [omega_max - c * step_int for c in range(n_int)]
else:
(sat, omegas) = smt_find(smt_dict, omega_max-delta_int, omega_max, n_int, ALPHA, verbose, None)
if not sat:
step_int = delta_int / n_int
omegas = [omega_max - c * step_int for c in range(n_int)]
if verbose == 0:
print("Warning: SMT not satisfied for int freq.")
for c in range(n_int):
colormap[str(c)] = omegas[c]
color_to_freq = _build_park_color_map()
def _park_freq(c):
omg = color_to_freq[str(-(c+1))]
return omg #+ get_flux_noise(device, omg, sigma)
def _initial_frequency():
freqs = dict()
for q in range(num_q):
freqs[q] = _park_freq(park_coloring[q])
return freqs
circ_mapped = get_map_circuit(circuit, coupling)
#circuit.draw(output='mpl')
t_act = np.zeros(num_q)
t_2q = np.zeros(num_q)
active_list = [False for i in range(num_q)]
success_rate = 1.0
tot_success = 0.0
tot_cnt = 0
max_colors = 0 # max number of colors used
worst_success = 1.0
park_freqs = _initial_frequency()
alphas = [ALPHA for f in park_freqs]
for i in range(num_q):
q_arr[i].idle_freq = [park_freqs[i], park_freqs[i]+alphas[i]]
ir = IR(qubits = q_arr, width = num_q, coupling = coupling, alpha = ALPHA)
# Check scheduler
if (scheduler == 'hybrid'):
print("Hybrid scheduler to be implemented.")
sys.exit(2)
else:
layers = get_layer_circuits(circ_mapped)
num_layers = len(layers)
idx = 0
total_time = 0.0 # ns
total_tcz = 0.0
if verbose == 0:
print("Num of layers:", num_layers)
#while (idx < num_layers or len(leftover) > 0):
for idx in range(num_layers):
# all_gates = []
layer_circuit = layers[idx]
if verbose == 0:
print(layer_circuit)
print(idx, "-----------------")
if decomp == 'flexible':
if verbose == 0:
print("Decompose layer", idx)
decomp_layer = decompose_layer_flexible(layer_circuit, G_crosstalk, verbose)
else:
decomp_layer = decompose_layer(layer_circuit, decomp)
if (scheduler == 'greedy'):
resched_layer = reschedule_layer(decomp_layer, coupling, verbose)
else:
resched_layer = decomp_layer
if lim_colors > 0:
resched_layer = limit_colors(resched_layer, lim_colors, G_crosstalk, verbose)
for layer in resched_layer:
print(layer.qasm())
insts = []
# Pre-fill edges for constructing (undirected) xtalk graph
#edges = [leftover[i//2] if i%2==0 else (leftover[i//2][1], leftover[i//2][0]) for i in range(2*len(leftover))]
edges = []
#edges_cphase = [] # if (q1,q2) is in it, then (q2,q1) is also in it
#edges_iswaps = [] # if (q1,q2) is in it, then (q2,q1) is also in it
#curr_gates = [e for e in left_gates]
#leftover = []
#left_gates = []
taus = {} # For storing coupling times
gt = 0.0
layer_time = 0.0
barrier = False
for _, qargs, _ in layer.data:
if len(qargs) == 2:
q1, q2 = qargs[0].index, qargs[1].index
edge = (q1, q2)
edges.append(edge)
edges.append((q2,q1)) # because undirected graph
#print("+Edges:")
#print(edges)
if (len(edges) > 0):
#idx += 1
#continue
subgraph = nx.subgraph(G_crosstalk, edges)
#print(G_crosstalk.nodes())
#print(subgraph.nodes())
int_coloring = nx.coloring.greedy_color(subgraph)
#print("+int_coloring:")
#print(int_coloring)
num_int = len(set(int_coloring.values()))
while lim_colors > 0 and num_int > lim_colors:
# need to recolor the layer, cannot use greedy_color because it is not seeded
# int_coloring = nx.coloring.greedy_color(subgraph,strategy='random_sequential')
int_coloring = {}
nodes = list(subgraph)
np.random.shuffle(nodes)
for u in nodes:
# Set to keep track of colors of neighbours
neighbour_colors = {int_coloring[v] for v in subgraph[u] if v in int_coloring}
# Find the first unused color.
temp_color = 0
while temp_color in neighbour_colors:
temp_color += 1
# Assign the new color to the current node.
int_coloring[u] = temp_color
num_int = len(set(int_coloring.values()))
if verbose == 0:
print("num_int: ", num_int)
int_coloring = relabel_coloring(int_coloring)
if num_int > max_colors: max_colors = num_int
if num_int == 0:
idx += 1
continue
_add_int_color_map(color_to_freq, num_int)
def _int_freq(c):
omg = color_to_freq[str(c)]#+np.random.normal(0,sigma)
return omg #+ get_flux_noise(device, omg, sigma)
#print(layer)
#print("-----------------")
# Refill edges and curr_gates
#edges = [e for e in leftover]
# edges = []
#curr_gates = [e for e in left_gates]
# curr_gates = []
# single_qb_err = 0.0015
# single_qb_err_acc = 1.0
for g, qargs, cargs in layer.data:
if g.name == "barrier": barrier = True
if g.name == "measure": continue
#print(qargs)
#print(qargs[0].index)
if len(qargs) == 1: # single qubit gates
# all_gates.append((g.qasm(),(qargs[0].index, -1)))
active_list[qargs[0].index] = True
gt = device.gate_times[g.name]
if gt > layer_time: layer_time = gt
insts.append(Inst(g,qargs,cargs, None, gt))
# single_qb_err_acc *= 1 - single_qb_err
elif len(qargs) == 2:
q1, q2 = qargs[0].index, qargs[1].index
active_list[q1] = True
active_list[q2] = True
#edges.append((q1, q2))
#curr_gates.append((g.qasm(),(q1, q2)))
try:
f = _int_freq(int_coloring[(q1, q2)])
except:
f = _int_freq(int_coloring[(q2, q1)])
if (g.name == 'unitary' and g.label == 'iswap'):
f1 = f
f2 = f
taus[(q1,q2)] = np.pi / (2 * 0.5 * np.sqrt(f*f) * Cqq)
elif (g.name == 'unitary' and g.label == 'sqrtiswap'):
f1 = f
f2 = f
taus[(q1,q2)] = 0.5 * np.pi / (2 * 0.5 * np.sqrt(f*f) * Cqq)
elif (g.name == 'cz' or g.label == 'cz'):
f1 = f
f2 = f - alphas[q2] # b/c match f1 with f2+alpha
taus[(q1,q2)] = np.pi / (np.sqrt(2) * 0.5 * np.sqrt(f*f) * Cqq) # f is interaction freq
else:
print("Gate %s(%s) not recognized. Supports iswap, sqrtiswap, cz." % (g.name, g.label))
t_2q[q1] += taus[(q1,q2)]
t_2q[q2] += taus[(q1,q2)]
insts.append(Inst(g, qargs, cargs, [f1, f2], taus[(q1,q2)]))
# success_rate *= single_qb_err_acc
#if (scheduler == 'greedy'):
# edges, leftover, ind = greedy_reschedule(coupling, edges)
# for i in range(len(ind)):
# if (ind[i]):
# all_gates.append(curr_gates[i])
# else:
# left_gates.append(curr_gates[i])
#else:
# for i in range(len(curr_gates)):
# all_gates.append(curr_gates[i])
#for i in range(len(curr_gates)):
# all_gates.append(curr_gates[i])
if not barrier:
ir.append_layer_from_insts(insts)
qb_freqs = [q_omega[0] for q_omega in ir.data[-1][1]]
print(qb_freqs)
gt = get_max_time(gt, taus)
tot_cnt += 1
if gt > layer_time: layer_time = gt
total_time += layer_time
for qubit in range(num_q):
if active_list[qubit]:
t_act[qubit] += layer_time
idx += 1
ir.t_act = t_act
ir.t_2q = t_2q
ir.depth_before = idx
ir.depth_after = tot_cnt
ir.total_time = total_time
ir.max_colors = max_colors
return ir
| StarcoderdataPython |
137897 | <reponame>djevans071/Reba<gh_stars>0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 22:15:40 2017
@author: psamtik071
"""
#functions for feature-engineering
import pandas as pd
# make sure tot_docks > 0 (especially when calculating bikes available)
def bulk_query(year):
# query all bikes after or before a certain time
if year == 2015:
date_string = "< '2016-03-01'"
if year == 2016:
date_string = ">= '2016-03-01'"
query = """
SELECT a.id, a.date, a.hour, bikes_out, bikes_in, dayofweek, month,
is_weekday, is_holiday, rebal_net_flux, tot_docks, avail_bikes,
avail_docks, precip, snow, temp, long, lat
FROM features a
LEFT JOIN weather b ON a.date = b.date AND a.hour = b.hour
LEFT JOIN stations c on a.id=c.id
WHERE a.date {} AND tot_docks > 0
ORDER BY a.id, a.date, a.hour;
""".format(date_string)
return query
def strip_unused_stations(df, station_list):
return df[df.id.isin(station_list)]
def make_categorical(df, cols):
for col in cols:
df[col] = df[col].astype('category')
return df
def flux_conditions(x, threshold = 0.2):
# for x in pct_flux, set the following parameters:
# if x > 0.2 ---> flux_type 1 (rebalance down -- remove bikes)
# if x < -0.2 ---> flux_type -1 (rebalance up -- add bikes)
# if abs(x) <= 0.2 ---> flux_type 0 (don't rebalance)
if x > abs(threshold):
return x
elif x < -abs(threshold):
return x
else:
return 0
def temp_conditions(x):
# temperature categories
if x > 80.:
return 80 #hot
elif (x > 60.) & (x <= 80.):
return 70 #mild
elif (x > 40.) & (x <= 60.):
return 50 #chilly
else:
return 30 #cold
def precip_conditions(x):
# precipitation categories
if x > 0.10:
return 1
else:
return 0
def merge_by_date(df1, df2):
return pd.merge(df1, df2, how = 'left', on = 'date')
# create a daily avg flux column and shift it to get yesterday's flux for a given date.
# also with weekly fluxes
def make_lagged_fluxes(df):
mean_daily_flux = df.groupby('date').mean().flux
mean_yesterday_flux = mean_daily_flux.shift(1).reset_index()
mean_lastweek_flux = mean_daily_flux.shift(7).reset_index()
mean_daily_flux = mean_daily_flux.reset_index().rename(columns = {'flux': 'mean_flux'})
mean_yesterday_flux = mean_yesterday_flux.rename(columns = {'flux': 'yest_flux'})
mean_lastweek_flux = mean_lastweek_flux.rename(columns = {'flux': 'last_week_flux'})
dfs = [df, mean_daily_flux, mean_yesterday_flux, mean_lastweek_flux]
return reduce(merge_by_date, dfs)
def new_features(df):
df['date'] = pd.to_datetime(df.date)
df['hour'] = df['hour'].astype(int)
# turn strings 'True' and 'False' into 1 and 0
string_dict = {'True': 1, 'False':0}
df[['is_weekday', 'is_holiday']] = df[['is_weekday', 'is_holiday']].replace(string_dict)
# fix the number of total docks for a given day
total_docks = df.groupby(['date']).max().tot_docks.reset_index()
df = pd.merge(df, total_docks, how = 'left', on = 'date').rename(columns = {'tot_docks_y': 'tot_docks'})
df.drop('tot_docks_x', 1, inplace=True)
# engineer new features
df['flux'] = df.bikes_in - df.bikes_out
df['pct_bikes_in'] = df.bikes_in / df.tot_docks
df['pct_bikes_out'] = df.bikes_out / df.tot_docks
df['pct_avail_bikes'] = df.avail_bikes / df.tot_docks
df['pct_avail_docks'] = df.avail_docks / df.tot_docks
df['pct_flux'] = df.flux / df.tot_docks
#df['pct_rebal_flux'] = df.rebal_net_flux / df.tot_docks
#normalize precipitation
df['precip'] = df.precip / df.precip.max()
# get lagged features
df_with_lags = make_lagged_fluxes(df).dropna()
# hist_cols = ['mean_flux', 'yest_flux', 'last_week_flux']
# for col in hist_cols:
# df_with_lags[col] = df_with_lags[col].apply(flux_conditions).astype('category')
# df_with_lags = df_with_lags.dropna()
# features_to_clear = ['bikes_out', 'bikes_in','rebal_net_flux',
# 'tot_docks', 'avail_bikes', 'avail_docks', 'flux']
return df_with_lags
| StarcoderdataPython |
1614906 | <filename>tests/regression/lib/constants.py
TEST_COTROL_FILE = "test.cntl"
| StarcoderdataPython |
3346739 | <filename>alesisvsysex/ui/window.py<gh_stars>1-10
from PyQt5.QtWidgets import *
from alesisvsysex.protocol.model import AlesisV
from alesisvsysex.device.alesis import AlesisV25Device
from alesisvsysex.device.file import FileDevice
from alesisvsysex.ui.components import *
from alesisvsysex.ui.filedialog import *
__all__ = ['AlesisVSysexApplication']
class ActionMenuWidget (QWidget):
def __init__(self, parent):
super().__init__(parent)
self.initLayout()
def initLayout(self):
layout = QHBoxLayout()
bsavef = QPushButton('Save To File', self)
bsavef.clicked.connect(self.propagateCommand('saveFile'))
layout.addWidget(bsavef)
bloadf = QPushButton('Load From File', self)
bloadf.clicked.connect(self.propagateCommand('loadFile'))
layout.addWidget(bloadf)
bsaved = QPushButton('Save To Device', self)
bsaved.clicked.connect(self.propagateCommand('saveDevice'))
layout.addWidget(bsaved)
bloadd = QPushButton('Load From Device', self)
bloadd.clicked.connect(self.propagateCommand('loadDevice'))
layout.addWidget(bloadd)
self.setLayout(layout)
self.setFixedHeight(50)
def propagateCommand(self, command):
def closure():
getattr(self.parent().parent(), command)()
return closure
class ContainerWidget (QWidget):
def __init__(self):
super().__init__()
def getModel(self):
p = self.parent()
while not isinstance(p, EditorWidget):
p = p.parent()
return p.getModel()
class EditorWidget (QTabWidget):
def __init__(self, parent):
super().__init__(parent)
self.children = []
self.initLayout()
def addChild(self, parent, widget):
parent.addWidget(widget)
self.children.append(widget)
def initLayout(self):
pane1l = QHBoxLayout()
self.addChild(pane1l, BasicWidget(self, "Keys", 'keys'))
self.addChild(pane1l, BasicWidget(self, "Pitch Wheel", 'pwheel'))
self.addChild(pane1l, BasicWidget(self, "Mod Wheel", 'mwheel'))
self.addChild(pane1l, BasicWidget(self, "Sustain", 'sustain'))
pane1 = ContainerWidget()
pane1.setLayout(pane1l)
pane2l = QVBoxLayout()
self.addChild(pane2l, CompoundWidget(self, "Knobs", 'knobs'))
# self.addChild(pane2l, CompoundWidget(self, "Buttons", 'buttons'))
pane2 = ContainerWidget()
pane2.setLayout(pane2l)
pane3l = QVBoxLayout()
self.addChild(pane3l,CompoundWidget(self, "Pads", 'pads'))
pane3 = ContainerWidget()
pane3.setLayout(pane3l)
self.addTab(pane1, "Keys / Wheels / Sustain")
self.addTab(pane2, "Knobs")
self.addTab(pane3, "Pads")
def getModel(self):
return self.parentWidget().parentWidget().model
def updateState(self):
for c in self.children:
c.updateState()
class MainWidget (QWidget):
def __init__(self, parent):
super().__init__(parent)
self.initLayout()
def initLayout(self):
layout = QVBoxLayout()
self.actionWidget = ActionMenuWidget(self)
layout.addWidget(self.actionWidget)
self.editorWidget = EditorWidget(self)
layout.addWidget(self.editorWidget)
self.setLayout(layout)
def updateState(self):
self.editorWidget.updateState()
class AlesisVSysexApplication (QMainWindow):
def __init__(self):
super().__init__()
self.model = AlesisV()
self.device = AlesisV25Device()
self.initWindow()
def initWindow(self):
self.setWindowTitle('Alesis V-Series SysEx Editor')
self.initWidget()
self.statusBar().showMessage('Ready.')
self.show()
def initWidget(self):
self.widget = MainWidget(self)
self.setCentralWidget(self.widget)
def saveFile(self):
launchSaveFileDialog(self)
def saveFileCallback(self, name):
f = FileDevice(name)
f.set_config(self.model)
self.statusBar().showMessage("Saved configuration to '%s'." % name)
def loadFile(self):
launchLoadFileDialog(self)
def loadFileCallback(self, name):
f = FileDevice(name)
self.model = f.get_config()
self.widget.updateState()
self.statusBar().showMessage("Loaded configuration from '%s'." % name)
def saveDevice(self):
self.device.set_config(self.model)
self.statusBar().showMessage("Saved configuration to MIDI device.")
def loadDevice(self):
self.model = self.device.get_config()
self.widget.updateState()
self.statusBar().showMessage("Loaded configuration from MIDI device.")
| StarcoderdataPython |
22170 | from math import log2
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from x_transformers import Encoder, Decoder
# helpers
def exists(val):
return val is not None
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
# classes
class DiscreteVAE(nn.Module):
def __init__(
self,
num_tokens,
dim = 512,
hidden_dim = 64
):
super().__init__()
hdim = hidden_dim
self.encoder = nn.Sequential(
nn.Conv2d(3, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, num_tokens, 1)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(dim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.ConvTranspose2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.ConvTranspose2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, 3, 1)
)
self.num_tokens = num_tokens
self.codebook = nn.Embedding(num_tokens, dim)
def forward(
self,
img,
return_recon_loss = False,
return_logits = False
):
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
soft_one_hot = F.gumbel_softmax(logits, tau = 1.)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_recon_loss:
return out
loss = F.mse_loss(img, out)
return loss
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim = 512,
num_text_tokens = 10000,
num_visual_tokens = 512,
text_enc_depth = 6,
visual_enc_depth = 6,
text_seq_len = 256,
visual_seq_len = 1024,
text_heads = 8,
visual_heads = 8
):
super().__init__()
self.scale = dim ** -0.5
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.visual_emb = nn.Embedding(num_visual_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len, dim)
self.visual_pos_emb = nn.Embedding(visual_seq_len, dim)
self.text_transformer = Encoder(dim = dim, depth = text_enc_depth, heads = text_heads)
self.visual_transformer = Encoder(dim = dim, depth = visual_enc_depth, heads = visual_heads)
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device = text.shape[0], text.device
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_emb = self.visual_emb(image)
image_emb += self.visual_pos_emb(torch.arange(image.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
sim = einsum('i d, j d -> i j', text_latents, image_latents) * self.scale
if not return_loss:
return sim
labels = torch.arange(b, device = device)
loss = F.cross_entropy(sim, labels)
return loss
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
num_text_tokens = 10000,
num_image_tokens = 512,
text_seq_len = 256,
image_seq_len = 1024,
depth = 6, # should be 64
heads = 8,
vae = None
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len, dim)
self.image_pos_emb = nn.Embedding(image_seq_len, dim)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.image_seq_len = image_seq_len
self.total_tokens = num_text_tokens + num_image_tokens + 1 # extra for EOS
self.vae = vae
self.image_emb = vae.codebook
self.transformer = Decoder(dim = dim, depth = depth, heads = heads)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
def forward(
self,
text,
image,
mask = None,
return_loss = False
):
device = text.device
is_raw_image = len(image.shape) == 4
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
if is_raw_image:
assert exists(self.vae), 'VAE must be passed into constructor if you are to train directly on raw images'
image_logits = self.vae(image, return_logits = True)
codebook_indices = image_logits.argmax(dim = 1).flatten(1)
image = codebook_indices
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(torch.arange(image.shape[1], device = device))
tokens = torch.cat((text_emb, image_emb), dim = 1)
if exists(mask):
mask = F.pad(mask, (0, self.image_seq_len), value = True)
out = self.transformer(tokens, mask = mask)
out = self.to_logits(out)
if not return_loss:
return out
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text, offsetted_image), dim = 1)
labels = F.pad(labels, (0, 1), value = (self.total_tokens - 1)) # last token predicts EOS
loss = F.cross_entropy(out.transpose(1, 2), labels[:, 1:])
return loss
| StarcoderdataPython |
1604941 | <reponame>kingsznhone/Project-Ragnarok<gh_stars>1-10
import asyncio
import pathlib
import ssl
import websockets
import json
import threading
from threading import Thread
async def rx (websocket):
while True:
greeting = await websocket.recv()
print (greeting)
async def startclient():
uri = "wss://localhost:8765"
async with websockets.connect(uri, ssl=ssl_context) as websocket:
await websocket.send("963")
await websocket.send("Kings")
while True:
greeting = await websocket.recv()
greeting = greeting.encode('utf-8').decode('unicode_escape')
print(f"< {greeting}")
#buffer = json.dumps({"msgType":"PrivateMessage",
# "TgtUser":"Queens",
# "text":input()})
buffer = json.dumps({"msgType":"Broadcast","text":input()})
await websocket.send(buffer)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.load_verify_locations("self.crt")
asyncio.get_event_loop().run_until_complete(startclient())
| StarcoderdataPython |
3406 | <gh_stars>1-10
#!/usr/bin/python
import yaml
import os
import ast
import sys
from collections import OrderedDict
curr_dir = os.getcwd()
work_dir = sys.argv[1]
network_type = sys.argv[2]
testplan_dict = {}
testplan_dict["name"] = "System performance test"
testplan_dict["description"] = "This test is to create as much chaincode computation load as possible"
testplan_dict["runid"] = "RUNID_HERE"
if network_type == "ibp":
testplan_dict["networkid"] = sys.argv[3]
testplan_dict["collectFabricMetrics"] = False
testplan_dict["storageclass"] = "default"
testplan_dict["saveLog"] = False
testplan_dict["continueAfterFail"] = True
testplan_dict["tests"] = []
testplan_dict["peernodeAlias"] =[]
if os.path.exists(work_dir) != True:
print 'certs keyfiles directory do not exist'
exit(1)
# Load template file
with open(curr_dir + "/templates/testplan_template.yml", 'r') as stream:
template = yaml.load(stream)
channel_create = template["CHANNEL_CREATE"]
# channel_join = template["CHANNEL_JOIN"]
chaincode_install = template["CHAINCODE_INSTALL"]
chaincode_instantiate = template["CHAINCODE_INSTANTIATE"]
chaincode_invoke = template["CHAINCODE_INVOKE"]
execute_command = template["EXECUTE_COMMAND"]
connectionProfile = {}
org_list = []
org_list_lowercase = []
orderer_list = []
peer_list = []
org_peers_dict = {}
org_anchor_dict ={}
allAnchor_list =[]
# Load connection profile
for orgName in os.listdir(work_dir + '/keyfiles'):
if os.path.isfile(work_dir + '/keyfiles/' + orgName + '/connection.yml'):
with open(work_dir + '/keyfiles/' + orgName + '/connection.yml', 'r') as stream:
connectionProfile = yaml.load(stream)
if connectionProfile["orderers"] is None:
continue
orderer_list = orderer_list + connectionProfile["orderers"].keys()
if (connectionProfile["organizations"][orgName.lower()]["peers"] != None):
org_list.append(orgName)
org_list_lowercase.append(orgName.lower())
org_peers_dict[orgName] = connectionProfile["organizations"][orgName.lower(
)]["peers"]
peer_list = peer_list + \
connectionProfile["organizations"][orgName.lower(
)]["peers"]
org_anchor_dict[orgName] = sorted(
connectionProfile["organizations"][orgName.lower(
)]["peers"])[0]
# When there is only peer or orderer, we skip tests.
if len(orderer_list) == 0 or len(peer_list) == 0:
outputfile =open(work_dir + '/testplan_example.yml','w')
outputfile.write("")
outputfile.close()
exit(0)
orderer_list = list(OrderedDict.fromkeys(orderer_list))
peer_list = list(OrderedDict.fromkeys(peer_list))
for orgName in org_list :
tempOrgAnchorObj={}
tempOrgAnchorObj[orgName+"Anchor"] = org_anchor_dict[orgName]
testplan_dict["peernodeAlias"].append(tempOrgAnchorObj)
tempOrgPeersObj={}
tempOrgPeersObj[orgName+"Peers"] = ','.join(org_peers_dict[orgName])
testplan_dict["peernodeAlias"].append(tempOrgPeersObj)
allAnchor_list.append(org_anchor_dict[orgName])
testplan_dict["peernodeAlias"].append({"allAnchors":','.join(allAnchor_list)})
testplan_dict["peernodeAlias"].append({"allPeers":','.join(peer_list)})
print 'org list: '
print org_list_lowercase
print 'orderer_list: '
print orderer_list
print 'peer_list: '
print peer_list
print 'allAnchor_list'
print allAnchor_list
# CREATE_CHANNEL
channel_create["parameters"]["connectionProfile"] = org_list[0]
if network_type == 'cello':
channel_create["parameters"]["channelConsortium"] = 'FabricConsortium'
else:
channel_create["parameters"]["channelConsortium"] = 'SampleConsortium'
channel_create["parameters"]["channelOrgs"] = ','.join(org_list_lowercase)
channel_create["parameters"]["ordererName"] = orderer_list[0]
testplan_dict["tests"].append(channel_create)
# JOIN_CHANNEL and INSTALL_CHAINCODE
join_list = []
install_list = []
for org in org_list:
channel_join = template["CHANNEL_JOIN"]
channel_join["parameters"]["connectionProfile"] = org
channel_join["parameters"]["peers"] = ','.join(org_peers_dict[org])
channel_join["parameters"]["ordererName"] = orderer_list[0]
join_list.append(str(channel_join))
# CHAINCODE_INSTALL
chaincode_install["parameters"]["connectionProfile"] = org
chaincode_install["parameters"]["peers"] = ','.join(org_peers_dict[org])
install_list.append(str(chaincode_install))
for join_org in join_list:
join_item = ast.literal_eval(join_org)
testplan_dict["tests"].append(join_item)
for install_org in install_list:
install_item = ast.literal_eval(install_org)
testplan_dict["tests"].append(install_item)
# CHAINCODE_INSTANTIATE
chaincode_instantiate["parameters"]["connectionProfile"] = org_list[0]
chaincode_instantiate["parameters"]["peers"] = ','.join(peer_list)
# CHAINCODE_INVOKE
# Invoke with fixed transaction count : 100
chaincode_invoke["iterationCount"] = '100'
chaincode_invoke["parameters"]["connectionProfile"] = org_list[0]
chaincode_invoke["parameters"]["peers"] = ','.join(peer_list)
chaincoode_invoke_count = str(chaincode_invoke)
# Invoke with fixed running duration : 0 hour 10 minutes 0 second.
# And enable running tests parallel by setting waitUntilFinish to true
chaincode_invoke["iterationCount"] = '0h10m0s'
chaincode_invoke["waitUntilFinish"] = False
chaincoode_invoke_time = str(chaincode_invoke)
# Invoke with fixed running duration : 0 hour 10 minutes 0 second
chaincode_invoke["iterationCount"] = '0h10m0s'
chaincode_invoke["parameters"]["peers"] = peer_list[0]
chaincoode_invoke_parallel = str(chaincode_invoke)
testplan_dict["tests"].append(chaincode_instantiate)
testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_count))
testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_time))
testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_parallel))
# Execute command with default images
testplan_dict["tests"].append(ast.literal_eval(str(execute_command)))
# Execute command with customized image
execute_command["name"] = "execute-command-with-customized-image"
execute_command["container"] = "user/ownimage"
testplan_dict["tests"].append(ast.literal_eval(str(execute_command)))
connYamlStr= yaml.dump(testplan_dict,default_flow_style=False)
tempstr= connYamlStr
for orgName in org_list :
tempstr = tempstr.replace(orgName+"Anchor:",orgName+"Anchor: &"+orgName+"Anchor")
tempstr = tempstr.replace(orgName+"Peers:",orgName+"Peers: &"+orgName+"Peers")
tempstr = tempstr.replace("allAnchors:","allAnchors: &allAnchors")
tempstr = tempstr.replace("allPeers:","allPeers: &allPeers")
tempstr = tempstr.replace("runid:","runid: &runid")
if network_type == "ibp":
tempstr = tempstr.replace("networkid:","networkid: &networkid")
# Dump testplan file
outputfile =open(work_dir + '/testplan_example.yml','w')
outputfile.write(tempstr)
outputfile.close()
| StarcoderdataPython |
17804 | <reponame>euranova/estimating_eces
# -*- coding: utf-8 -*-
"""
@author: nicolas.posocco
"""
from abc import ABC
import numpy as np
class MulticlassDistribution(ABC):
def __init__(self):
"""
Initializes the distribution, allowing later sampling and posterior probabilities calculations.
"""
pass
def sample(self, n_samples, return_posterior=True, reproducible=None):
"""
Samples n_samples times from the distribution, and their label.
Returns also the array of posterior probabilities if return_posterior=True.
"""
# n_samples
assert type(n_samples) is int, "n_samples should be an integer."
assert n_samples > 0, "n_samples should be positive."
# return_posterior
assert type(return_posterior) is bool, "return_posterior should be a boolean."
# reproducible
assert type(reproducible) is np.random.RandomState, "reproducible should be a np.random.RandomState object."
raise NotImplementedError
def posteriors(self, X):
# X
assert type(X) is np.ndarray, "X should be a numpy array."
assert X.ndim == 2, "X should be of shape (n_samples, n_features), here is of shape {}".format(X.shape)
raise NotImplementedError
def get_bayes_classifier(self):
"""
Instanciates the optimal Bayes classifier for this distribution.
"""
return BayesClassifier(distribution=self)
class BayesClassifier(ABC):
def __init__(self, distribution):
# distribution
assert isinstance(distribution,
MulticlassDistribution), "distribution should inherit from MulticlassDistribution."
self.distribution = distribution
def fit(self, X):
pass
def predict_proba(self, X):
# X
assert type(X) is np.ndarray, "X should be a numpy array, here is a {}.".format(type(X))
assert X.ndim == 2, "X should be of shape (n_samples, n_features), here is of shape {}".format(X.shape)
posteriors = self.distribution.posteriors(X)
return posteriors
def predict(self, X):
# X
assert type(X) is np.ndarray, "X should be a numpy array, here is a {}.".format(type(X))
assert X.ndim == 2, "X should be of shape (n_samples, n_features), here is of shape {}".format(X.shape)
posteriors = self.predict_proba(X)
return np.argmax(posteriors, axis=0) | StarcoderdataPython |
1708519 | # Wifi Configuration
#
import machine
import network
import time
import uiot._cfg as _cfg
import unetrepl as nr
# activate wifi network
_ap = network.WLAN(network.AP_IF)
_ap.active(False)
_wlan = network.WLAN(network.STA_IF)
_wlan.active(True)
_wlan_laststate = False
# connect to wifi and really try to connect
def connect_blocking():
global _wlan
activate()
# no scan of networks to allow connect to hidden essid
# Try to connect
tries = 15
for i in range(tries):
print("%d/%d. Trying to connect." % (i + 1, tries))
machine.idle()
time.sleep(1)
if connected():
break
if connected():
print('Wifi: connection succeeded!')
print(_wlan.ifconfig())
else:
print('Wifi: connection failed, starting accesspoint!')
accesspoint()
nr.start(nostop=True)
def activate():
global _wlan
_ap.active(False)
_wlan.active(True)
if _cfg.config.wifi_name:
_wlan.connect(_cfg.config.wifi_name, _cfg.config.wifi_pw)
def deactivate():
global _wlan
_ap.active(False)
_wlan.active(False)
def monitor():
# needs to be called on regular basis
global _wlan_laststate
if _wlan_laststate != connected(): # connection status change
if _wlan_laststate: # network became inactive
pass # shoudl be retried by esp in background
else: # network became active
nr.start(nostop=True) # start netrepl
_wlan_laststate = not _wlan_laststate # there was a change, so toggle
# TODO: consider activating AP mode, if not active for long time
def accesspoint():
global _wlan
print('Wifi: activating accesspoint.')
_wlan.active(False)
_ap.active(True)
def connected():
return _wlan.isconnected() and _wlan.status() == network.STAT_GOT_IP
def config():
return _wlan.ifconfig()
class SCAN:
def __call__(self):
global _wlan
state = _wlan.active()
if not state:
_wlan.active(True)
nets = _wlan.scan()
_wlan.active(state)
return nets
def __repr__(self):
l = ""
for n in self.__call__():
l = l + n[0].decode() + " %ddB\n" % n[3]
return l
scan = SCAN()
class WIP:
def __repr__(self):
return config()[0]
def __call__(self):
return self.__repr__()
wip = WIP()
# write config and connect
def setup(name, password, reset=True):
if name != _cfg.config.wifi_name or \
password != _cfg.config.wifi_pw:
_cfg.wifi(name, password)
print("Updated wifi config.")
if reset:
print("Resetting system in 3 seconds.")
time.sleep(1)
nr.stop()
time.sleep(2)
machine.reset()
else:
activate()
# when module loaded the first time start blocking to also bring up netrepl at
# right time
connect_blocking()
| StarcoderdataPython |
19808 | # -*- coding: utf-8 -*-
from tinyq.app import Application # noqa
__version__ = '0.3.0'
__author__ = 'mozillazg'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2017 mozillazg'
| StarcoderdataPython |
1750468 | <gh_stars>1-10
"""Highly divisible triangular number
The sequence of triangle numbers is generated by adding the natural numbers.
The 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1, 3
6: 1, 2, 3, 6
10: 1, 2, 5, 10
15: 1, 3, 5, 15
21: 1, 3, 7, 21
28: 1, 2, 4, 7, 14, 28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
import math
def triangle_number_generator():
"""Generator yielding the sequence of triangle numbers."""
i = 0
while True:
i += 1
yield int(i * (i + 1) / 2)
def check_divisors(target):
"""Return the value of the first triangle number to have greater than the target number of divisors."""
triangles = triangle_number_generator()
for triangle in triangles:
divisors = 0
for i in range(1, int(math.sqrt(triangle) + 1)):
if triangle % i == 0:
divisors += 1
if i*i != triangle:
divisors += 1
if divisors > target:
return triangle
def check_divisors_alternate(target):
"""Return the value of the first triangle number to have greater than the target number of divisors.
Uses prime factorizations. Any integer N can be expressed as
N = p_0^a_0 + p_1^a_1 + ... + p_n^a_n,
where p_n is a distinct prime number and a_n is its exponent. The number of divisors D(N) of any integer
N can be computed as
D(N) = (a_0 + 1) * (a_1 + 1) * ... * (a_n + 1)
"""
triangles = triangle_number_generator()
for triangle in triangles:
divisors = 1
number = triangle
for candidate in range(2, triangle):
exponent = 0
while number % candidate == 0:
exponent += 1
number /= candidate
divisors *= exponent + 1
if divisors > target:
return triangle
if number == 1:
break
| StarcoderdataPython |
3248148 | <reponame>derekray311511/permatrack
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from ..video_dataset import VideoDataset
class PDTracking(VideoDataset):
num_categories = 5
dataset_folder = 'pd'
default_resolution = [384, 960]
class_name = ['Pedestrian', 'Car', 'Cyclist', 'Caravan/RV', 'Truck']
# negative id is for "not as negative sample for abs(id)".
# 0 for ignore losses for all categories in the bounding box region
# ['Pedestrian', 'Car', 'Bicyclist', 'Bus', 'Caravan/RV', 'OtherMovable',
# 'Motorcycle', 'Motorcyclist', 'OtherRider', 'Train', 'Truck', 'Dontcare']
cat_ids = {1:1, 2:2, 3:3, 4:-9999, 5:4, 6:-2, 7:-9999, 8:-1, 9:-1, 10:-9999, 11:5}
max_objs = 500
def __init__(self, opt, split, rank=None):
data_dir = os.path.join(opt.data_dir, self.dataset_folder)
split_ = 'train' if opt.dataset_version != 'test' else 'test' #'test'
img_dir = data_dir
if split == 'train':
ann_file_ = "train"
else:
ann_file_ = 'val'
ann_path = os.path.join(
data_dir, 'annotations', 'tracking_{}.json'.format(
ann_file_))
self.images = None
super(PDTracking, self).__init__(opt, split, ann_path, img_dir)
self.box_size_thresh = [300, 500, 300, 500, 500]
if opt.only_ped:
self.num_categories = 1
self.class_name = ['person']
self.cat_ids = {1:1, 2:-9999, 3:-1, 4:-9999, 5:-9999, 6:-9999, 7:-9999, 8:-1, 9:-1, 10:-9999, 11:-9999}
self.box_size_thresh = [300]
if opt.nu:
self.num_categories = 8
self.class_name = ['Car', 'Truck', 'Bus', 'Trailer', 'construction_vehicle', 'Pedestrian', 'Motorcycle', 'Bicycle']
self.cat_ids = {1:6, 2:1, 3:0, 4:3, 5:1, 6:-1, 7:-7, 8:0, 9:0, 10:-9999, 11:2, 12:5, 13:-8}
self.box_size_thresh = [500, 500, 500, 500, 500, 300, 500, 500]
self.alpha_in_degree = False
self.depth_scale = 1
self.dep_mask = 0
self.dim_mask = 1
self.rot_mask = 0
self.amodel_offset_mask = 0
self.ignore_amodal = True
self.num_samples = len(self.images)
self.exp_id = opt.exp_id
if opt.const_v_over_occl:
self.const_v_over_occl = True
print('Loaded {} {} samples'.format(split, self.num_samples))
def save_results_ioueval(self, results, save_dir):
formattted_results = []
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
images = self.video_to_images[video_id]
for image_info in images:
img_id = image_info['id']
if not (img_id in results):
continue
frame_id = image_info['frame_id']
for i in range(len(results[img_id])):
item = results[img_id][i]
if item['age'] != 1:
continue
if 'visibility' in item and not item['visibility']:
continue
category_id = item['class']
track_id = item['tracking_id'] if 'tracking_id' in item else -1
bbox = [item['bbox'][0].item(), item['bbox'][1].item(), item['bbox'][2].item() - item['bbox'][0].item(), item['bbox'][3].item() - item['bbox'][1].item()]
entry = {'video_id': video_id, 'image_id': img_id, 'category_id': category_id, 'track_id': track_id, 'bbox': bbox, 'score': item['score'].item()}
formattted_results.append(entry)
print(save_dir + '/iou_eval.json')
json.dump(formattted_results, open(save_dir + '/iou_eval.json', 'w'))
def run_eval(self, results, save_dir, write_to_file=False, dataset_version="val"):
self.save_results_ioueval(results, save_dir)
os.chdir("../tao")
command = 'python scripts/evaluation/evaluate.py ' + \
'../data/%s/annotations/tracking_%s_tao.json ' % (self.dataset_folder, dataset_version) + \
'{}/iou_eval.json'.format(save_dir) + ' --config-updates CATEGORIES 1,2'
if write_to_file:
print("Writing to file")
command += ' > ../exp/tracking/{}/eval_out.txt'.format(self.exp_id)
os.system(command)
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
| StarcoderdataPython |
1654504 | <filename>campbell_diaz/crop.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Wageningen-UR
# <NAME>, June 2020
from pcse.base import SimulationObject, ParamTemplate, RatesTemplate, StatesTemplate, VariableKiosk
from pcse.decorators import prepare_rates, prepare_states
from pcse.traitlets import Float,Int, Instance, Enum, Unicode
import math
from mpmath import mp
from array import array
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import pandas as pd
from .partitioning import DVS_Partitioning as Partitioning
from .wofost_soybean_phenology import SoybeanPhenology as Phenology
from pcse.util import AfgenTrait
def convert_cm_to_m(v):
return v/100.
def convert_KPa_to_hPa(p):
return p*10.
def convert_hPa_to_KPa(k):
return k/10.
def convert_ha_m2(m):
return m*10000
def convert_j_Mj(j):
return j/1000000
def convert_g_kg(g):
return g/1000
class Campbell(SimulationObject):
"""Parameters**
============ ================================================= ==== ========
Name Description Unit
============ ================================================= ==== ========
DVV1 Paramter 1 for vapor deficit equation
DVV2 Paramter 2 for vapor deficit equation
DVV3 Paramter 3 for vapor deficit equation
NTR Nitrogen content in grain g.g-1
LNTR Nitrogen content in leaves g.g-1
FNTR Fraction of N translocated from leaves to seeds
HD Factor to standardize humidity content at 13%
K Light extinction coefficient (Kukal and Irmak, 2020)
Ppar Proportion of PAR in the total radiation
WUE Dry matter water ratio Pa
DSLA Specific leaf area for dead leaves m-2 kg-1
RUE Radiation use efficiency (Kukal and Irmak, 2020) g Mj m2
GCC Conversion coefficient (CHO to soyeban seed)
LAIC Critic leaf area index m-2 m-2
FTRANSL Fraction of TDM to be translocated
RDRSHM Maximum relative death rate of leaves due to shading (LINTUL2) d-1
============ ================================================= ==== ========
Rates**
============ ================================================= ==== ========
Name Description Unit
============ ================================================= ==== ========
FI Fractional interception
PE Potential evaporation m
PT Potential transpiration m
VDD Correction by vapor deficit water KPa
PARi Intercepted PAR Mj m-2
DM Rate of growth kg m-2
ROOT Growth rate root kg m-2
STEMS Growth rate stems kg m-2
LEAF Growth rate leaf kg m-2
SEED Growth rate storage organs kg m-2
TN Translocated nitrogen from leaves to grains kg m-2
DLEAF Senescence rate of leaf kg m-2
RDRSH Relative death rate of leaves due to shading (LINTUL2) d-1
RDRT Table of RDR as a function of temperature
============ ================================================= ==== ========
State variables**
Name Description Unit
============ ================================================= ==== ========
TPE Total potential evaporation m
TPT Total potential transpiration m
TDM Total above-ground biomass kg m-2
TROOT Total weight of roots kg m-2
TSTEMS Total weight of stems kg m-2
TLEAF Total weight of leaves m-2 m-2
TSEED Total weight of storage organs kg m-2
LAI Leaf area index m-2 m-2
TDLEAF Total of dead leaves m-2 m-2
GLEAF Total of green leaves m-2 m-2
SLA Specific leaf area m-2 kg-1
============ ================================================= ==== ========
**External dependencies:**
======= =================================== ================= ============
Name Description Provided by Unit
======= =================================== ================= ============
FR Fraction partitioned to roots. Y -
FS Fraction partitioned to stems. Y -
FL Fraction partitioned to leaves. Y -
FO Fraction partitioned to storage orgains Y -
DVS Development stage
======= =================================== ================= ============
"""
# sub-model components for crop simulation
pheno = Instance(SimulationObject)
part = Instance(SimulationObject)
soil = Instance(SimulationObject)
class Parameters(ParamTemplate):
RDRT = AfgenTrait()
RDRSHM = Float(-99.)
LAIC = Float(-99.)
DVV1 = Float(-99.)
DVV2 = Float(-99.)
DVV3 = Float(-99.)
initLAI = Float(-99.)
K = Float(-99.)
Ppar = Float(-99.)
WUE = Float(-99.)
DSLA = Float(-99.)
NTR = Float(-99.)
LNTR = Float(-99.)
FNTR = Float(-99.)
HD = Float(-99.)
GCC = Float(-99.)
FTRANSL = Float(-99.)
SLATB = AfgenTrait()
RUE = Float(-99.)
RDMAX = Float(-99.)
class RateVariables(RatesTemplate):
RDRDV = Float(-99.)
RDRSH = Float(-99.)
RDR = Float(-99.)
DLAI = Float(-99.)
DM_W = Float(-99.)
DM_R = Float(-99.)
DM = Float(-99.)
PDM = Float(-99.)
VDD = Float(-99.)
FI = Float(-99.)
ROOT = Float(-99.)
STEMS = Float(-99.)
LEAF = Float(-99.)
WLEAF = Float(-99.)
SEED = Float(-99.)
PSEED = Float(-99.)
TN = Float(-99.)
WDLEAF = Float(-99.)
DLEAF = Float(-99.)
GLEAF = Float(-99.)
TRANSL = Float(-99.)
#root
RD = Float(-99.)
WD = Float(-99.)
class StateVariables(StatesTemplate):
TDM = Float(-99.)
TDMv = Float(-99.)
TSTEM = Float(-99.)
TLEAF = Float(-99.)
TSEED = Float(-99.)
YIELD = Float(-99.)
LAI = Float(-99.)
LAIFlowering = Float(-99.)
TDLEAF = Float(-99.)
SLA = Float(-99.)
TDMFlowering = Float(-99.)
TDMTRANSL = Float(-99.)
POOLTRSL = Float(-99.)
#root
TRD = Float(-99.)
da = Int
CWDv = Float(-99.)
CWDr = Float(-99.)
def initialize(self, day, kiosk, parametervalues):
self.params = self.Parameters(parametervalues)
self.rates = self.RateVariables(kiosk, publish=["FI"])
self.kiosk = kiosk
# Initialize components of the crop
self.pheno = Phenology(day, kiosk, parametervalues)
self.part = Partitioning(day, kiosk, parametervalues)
DVS = self.kiosk["DVS"]
SLA = self.params.SLATB(DVS)
# =============================================================================
# # Initial total (living+dead) above-ground biomass of the crop
# FR = self.kiosk["FR"]
# FS = self.kiosk["FS"]
# FL = self.kiosk["FL"]
# FO = self.kiosk["FO"]
# =============================================================================
self.states = self.StateVariables(kiosk, publish=["TRD"],
TDM=0.00, TDMv=0, GLEAF=0.0, TSTEM=0.0,
TLEAF=0.0,TSEED=0.0,YIELD=0.0,
LAI=self.params.initLAI, TDLEAF =0, SLA=SLA, TRD=0.0,da=0,
TDMFlowering=None, LAIFlowering=None, TDMTRANSL=0,POOLTRSL=0, CWDv=0.,CWDr=0.)
@prepare_rates
def calc_rates(self, day, drv):
p = self.params
r = self.rates
s = self.states
k = self.kiosk
self.pheno.calc_rates(day, drv)
crop_stage = self.pheno.get_variable("STAGE")
# if before emergence there is no need to continue
# because only the phenology is running.
if crop_stage == "emerging":
return
self.part.calc_rates(day, drv)
r.VDD = convert_hPa_to_KPa(drv.TMAX - drv.TMIN)*((p.DVV1*drv.TEMP+ p.DVV2)*drv.TEMP+ p.DVV3)
print("VDD=", r.VDD)
r.FI = 1. - mp.exp(-p.K * s.LAI)
r.PARi = convert_j_Mj(drv.IRRAD) * p.Ppar * r.FI
if k.DVS < 2:
if "Ta" not in self.kiosk:
k.Ta = 0.001
else:
k.Ta = self.kiosk["Ta"]
if "W_Stress" not in self.kiosk:
k.W_Stress = 0.0
else:
k.W_Stress = self.kiosk["W_Stress"]
if "PTa" not in self.kiosk:
k.PTa = 0.0
else:
k.PTa = self.kiosk["PTa"]
r.DM_W = k.Ta * (p.WUE/r.VDD)
print("Ta=", k.Ta)
print("DM=", r.DM_W)
r.DM_R = convert_g_kg(r.PARi * p.RUE )
r.DM = min(r.DM_W, r.DM_R)
r.PDM = k.PTa * (p.WUE/r.VDD)
r.STEMS = r.DM * k.FS
r.WLEAF = r.DM * k.FL
r.LEAF = r.DM * k.FL * convert_ha_m2(s.SLA)
r.PSEED = r.PDM * k.FO
# Biomass reallocated from vegetative to seed
if s.TDMTRANSL>0:
r.TRANSL = r.PSEED - (r.DM * k.FO)
r.SEED = (r.DM * k.FO) + r.TRANSL
else: r.SEED = r.DM * k.FO
# Senescence from N translocation
r.TN = ((r.SEED*p.NTR)/p.FNTR)
#senescence rate from LINTUL
if k.DVS>1.5:
r.RDRDV = p.RDRT(drv.TEMP)
r.RDRSH = p.RDRSHM *((s.LAI - p.LAIC)/ p.LAIC)
if r.RDRSH >0 or r.RDRSH <0.03:
r.RDRSH = r.RDRSH
if r.RDRSH < 0:
r.RDRSH =0
if r.RDRSH >0.03:
r.RDRSH =0.03
else: r.RDRDV = 0
r.RDR = max(r.RDRDV, r.RDRSH)
r.DLAI = s.LAI * r.RDR
r.WDLEAF = r.TN/p.LNTR
r.DLEAF = r.WDLEAF * p.DSLA
r.GLEAF = r.LEAF - max(r.DLAI, r.DLEAF)
#Rooting growth
r.RD = p.RDMAX * (1./(1+44.2*math.exp(-15*(s.da)/(140))))
r.WD = k.PTa - k.Ta
@prepare_states
def integrate(self,day,delt):
p = self.params
r = self.rates
s = self.states
k = self.kiosk
# crop stage before integration
crop_stage = self.pheno.get_variable("STAGE")
self.pheno.integrate(day, delt=1.0)
# if before emergence there is no need to continue
# because only the phenology is running.
# Just run a touch() to to ensure that all state variables are available
# in the kiosk
if crop_stage == "emerging":
self.touch()
return
self.part.integrate(day, delt=1.0)
DVS = self.kiosk["DVS"]
s.SLA = p.SLATB(DVS)
s.TDM += r.DM
print("TDM", s.TDM)
s.TDMv += r.STEMS + r.WLEAF
if s.TDMFlowering is None and k.DVS >= 1.:
s.TDMFlowering = s.TDMv
s.TDMTRANSL = s.TDMFlowering * p.FTRANSL
s.TDMTRANSL -= r.TRANSL
s.TSEED += r.SEED
s.YIELD = s.TSEED * p.GCC * p.HD
s.TSTEM += r.STEMS
s.TLEAF += r.DM * k.FL
s.LAI += r.GLEAF
if s.LAIFlowering is None and k.DVS >= 1.3:
s.LAIFlowering = s.LAI
s.da+=1
s.TRD = r.RD
if k.DVS<1:
s.CWDv += r.WD
else:
s.CWDr += r.WD
| StarcoderdataPython |
3388466 | import platform
import casual.make.entity.target as target
import casual.make.tools.environment as environment
import os
import subprocess
def add_item_to_list(items, item):
new_list = []
if not items:
return new_list
for i in items:
if isinstance(i, target.Target):
new_list.append(item + i.name())
else:
new_list.append(item + i)
return new_list
def verify_type(name):
if not isinstance(name, str):
raise SystemError("Can't call this method with " + str(type(name)))
def assemble_path(sub_directory, name, main_directory=None, prefix="", suffix=""):
if main_directory:
assembled = os.path.join(
main_directory, sub_directory, prefix + name + suffix)
else:
assembled = os.path.join(sub_directory, prefix + name + suffix)
return assembled
def casual_build_version():
if environment.get("CASUAL_MAKE_BUILD_VERSION"):
return ["-DCASUAL_MAKE_BUILD_VERSION=\"" + environment.get("CASUAL_MAKE_BUILD_VERSION") + "\""]
else:
return []
def casual_build_commit_hash():
try:
githash = subprocess.check_output(
["git", "rev-parse", "HEAD"]).rstrip().decode()
return ["-DCASUAL_MAKE_COMMIT_HASH=\"" + githash + "\""]
except:
return []
def optional_flags():
return environment.get("OPTIONAL_FLAGS", "").split()
def cxx():
return ["g++"] if not environment.get("CXX") \
else environment.get("CXX").split()
def lint_command():
return ["clang-tidy"] if not environment.get("LINT_COMMAND") \
else environment.get("LINT_COMMAND").split()
def lint_pre_directives():
return ["-quiet", "-config", "''", "--"] if not environment.get("LINT_PRE_DIRECTIVES") \
else environment.get("LINT_PRE_DIRECTIVES").split()
def executable_linker():
return cxx() if not environment.get("EXECUTABLE_LINKER") \
else environment.get("EXECUTABLE_LINKER").split()
def archive_linker():
return ["ar", "rcs"]
def cpp_standard():
if platform.system().startswith('CYGWIN'):
return ["-std=gnu++17"]
else:
return ["-std=c++17"]
def optional_possible_flags():
return ["-fdiagnostics-color=always"]
| StarcoderdataPython |
3394726 | from django.conf.urls import url
from rest_framework import permissions
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.generics import ListAPIView, RetrieveAPIView
from medical_peek_core.service.url import UrlProperty
from medical_peek_core.model.dmo.medical_item import MedicalItem, MedicalItemSerializer
class MedicalItemDetail(RetrieveAPIView, UrlProperty):
"""
Detailed Medical Item Information
"""
queryset = MedicalItem.objects.all()
serializer_class = MedicalItemSerializer
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,)
authentication_classes = (SessionAuthentication, TokenAuthentication,)
def __get_urls(self, prefix = r'medical-item/'):
url_patterns = [
url(regex = prefix + r'(?P<pk>[0-9]+)/?$', view = MedicalItemDetail.as_view()),
]
return url_patterns
@property
def urls(self):
return self.__get_urls()
class MedicalItemList(ListAPIView, UrlProperty):
"""
All Detailed Medical Item Information
"""
queryset = MedicalItem.objects.all()
serializer_class = MedicalItemSerializer
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,)
authentication_classes = (SessionAuthentication, TokenAuthentication,)
def __get_urls(self, prefix = r'medical-item/?'):
url_patterns = [
url(regex = rf'{prefix}$', view = MedicalItemList.as_view()),
]
return url_patterns
@property
def urls(self):
return self.__get_urls()
medical_item_detail = MedicalItemDetail()
medical_item_list = MedicalItemList()
| StarcoderdataPython |
1732154 | import matplotlib.pyplot as plt
import jieba
import xlrd
from wordcloud import WordCloud, STOPWORDS
from PIL import Image
import numpy as np
class wcd:
# workBook = xlrd.open_workbook('../data/TagSupplement2.xlsx')
def beTXT(self):
global file_handle
allSheetNames = self.workBook.sheet_names()
print(allSheetNames)
# 1.2 按索引号获取sheet的名字(string类型)
sheet1Name = self.workBook.sheet_names()[0]
print(sheet1Name)
# 2. 获取sheet内容
## 2.1 法1:按索引号获取sheet内容
sheet1_content1 = self.workBook.sheet_by_index(0) # sheet索引从0开始
for n in range(1,sheet1_content1.nrows):
x=sheet1_content1.cell(n,1).value
file_handle = open('wd.txt', mode='a')
for m in range(0,int(sheet1_content1.cell(n,2).value)):
file_handle.write(x+" ")
return file_handle
def create(self):
txt=open('../map/wd.txt','r').read()
mask_pic = Image.open("u=1302885550,4025528368&fm=26&gp=0.png")
mask_pic_array = np.array(mask_pic)
plt.figure(figsize=(16, 9))
stopwords = set(STOPWORDS)
stopwords.add("美国")
stopwords.add("说")
stopwords.add("没")
stopwords.add("没有")
wordcloud = WordCloud(font_path="simsun.ttf",
mask=mask_pic_array,
stopwords=stopwords,
collocations=False,
background_color="white").generate(txt)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# plt.savefig('口罩词云.jpg')
plt.show()
if __name__ == '__main__':
x = wcd
x.create(x)
| StarcoderdataPython |
1668617 | #
# ex03.py
# Python the Hard Way Exercise #3B
#
# Created by <NAME> on 12/23/14.
# Copyright (c) 2014 ddApps. All rights reserved.
# ------------------------------------------------
# Study Drill Question 3:
# Find something you need to calculate and write a new .py fi le that does it.
# import the Python math Standard Library
import math
print "Let's calculate the Area of a Circle with radius of 5."
# Area = pi x radius squared
print math.pi * pow(5, 2) | StarcoderdataPython |
3226699 | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/Treemaker/Splitter/',1)[0])+'/cfipython/slc6_amd64_gcc493/Treemaker/Splitter')
| StarcoderdataPython |
1623988 | <reponame>LeeBeral/python
x = 'abc'
y = 'def'
z = ['d','e','f']
print(x.join(y))
print(x.join(z)) | StarcoderdataPython |
3320489 | default_app_config = 'oscar.apps.voucher.config.VoucherConfig'
| StarcoderdataPython |
3206071 | #This is the flitesensor init file
| StarcoderdataPython |
108831 | # -*- coding: utf-8 -*-
"""
Created on 2021/4/25 10:22 上午
---------
@summary: 将浏览器的cookie转为request的cookie
---------
@author: Boris
@email: <EMAIL>
"""
import json
import pyperclip
from feapder.utils.tools import get_cookies_from_str, print_pretty
class CreateCookies:
def get_data(self):
"""
@summary: 从剪切板中读取内容
---------
---------
@result:
"""
input("请复制浏览器cookie (列表或字符串格式), 复制后按任意键读取剪切板内容\n")
text = pyperclip.paste()
print(text + "\n")
return text
def create(self):
data = self.get_data()
cookies = {}
try:
data_json = json.loads(data)
for data in data_json:
cookies[data.get("name")] = data.get("value")
except:
cookies = get_cookies_from_str(data)
print_pretty(cookies)
| StarcoderdataPython |
171030 | # Source:
# https://www.kaggle.com/sachinsharma1123/otto-group-classification-acc-82from sklearn.pipeline import Pipeline
dataset = "otto"
metric = "neg_log_loss"
def make_pipeline():
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# focused on SVC pipeline
# to provide some diversity
# (i.e. not just XGB pipelines)
# probability=True needed for neg_log_loss
clf = SVC(probability=True, random_state=0)
p = Pipeline([("clf", clf)])
return p
| StarcoderdataPython |
1694435 | from .models.db_init import db
from .app import application
from .routes import api
from .logger import Logger
__all__ = [
'db',
'application',
'api',
'Logger'
]
| StarcoderdataPython |
59460 | <reponame>PacktPublishing/-Data-Wrangling-with-Python-3.x<filename>Section 2/2.1/code1.py
import xlrd
book = xlrd.open_workbook('SampleSuperstore.xls')
print("The number of sheets in the above excel file are {}".format(book.nsheets))
print("The names of sheets in the above excel file are {}".format(book.sheet_names()))
sheet1 = book.sheet_by_name('People')
sheet2 = book.sheet_by_index(0)
for i in range(sheet1.nrows):
print(sheet1.row(i))
| StarcoderdataPython |