hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7335cb0ff48cfe398a0b353de5f1570850d9c8fa | 3,752 | py | Python | frappe/core/doctype/sms_settings/sms_settings.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
]
| null | null | null | frappe/core/doctype/sms_settings/sms_settings.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
]
| null | null | null | frappe/core/doctype/sms_settings/sms_settings.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw, msgprint
from frappe.utils import nowdate
from frappe.model.document import Document
import six
from six import string_types
class SMSSettings(Document):
pass
def validate_receiver_nos(receiver_list):
validated_receiver_list = []
for d in receiver_list:
if not d:
break
# remove invalid character
for x in [' ','-', '(', ')']:
d = d.replace(x, '')
validated_receiver_list.append(d)
if not validated_receiver_list:
throw(_("Please enter valid mobile nos"))
return validated_receiver_list
@frappe.whitelist()
def get_contact_number(contact_name, ref_doctype, ref_name):
"returns mobile number of the contact"
number = frappe.db.sql("""select mobile_no, phone from tabContact
where name=%s
and exists(
select name from `tabDynamic Link` where link_doctype=%s and link_name=%s
)
""", (contact_name, ref_doctype, ref_name))
return number and (number[0][0] or number[0][1]) or ''
@frappe.whitelist()
def send_sms(receiver_list, msg, sender_name = '', success_msg = True):
import json
if isinstance(receiver_list, string_types):
receiver_list = json.loads(receiver_list)
if not isinstance(receiver_list, list):
receiver_list = [receiver_list]
receiver_list = validate_receiver_nos(receiver_list)
arg = {
'receiver_list' : receiver_list,
'message' : frappe.safe_decode(msg).encode('utf-8'),
'success_msg' : success_msg
}
if frappe.db.get_value('SMS Settings', None, 'sms_gateway_url'):
send_via_gateway(arg)
else:
msgprint(_("Please Update SMS Settings"))
def send_via_gateway(arg):
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
headers = get_headers(ss)
use_json = headers.get("Content-Type") == "application/json"
message = frappe.safe_decode(arg.get('message'))
args = {ss.message_parameter: message}
for d in ss.get("parameters"):
if not d.header:
args[d.parameter] = d.value
success_list = []
for d in arg.get('receiver_list'):
args[ss.receiver_parameter] = d
status = send_request(ss.sms_gateway_url, args, headers, ss.use_post, use_json)
if 200 <= status < 300:
success_list.append(d)
if len(success_list) > 0:
args.update(arg)
create_sms_log(args, success_list)
if arg.get('success_msg'):
frappe.msgprint(_("SMS sent to following numbers: {0}").format("\n" + "\n".join(success_list)))
def get_headers(sms_settings=None):
if not sms_settings:
sms_settings = frappe.get_doc('SMS Settings', 'SMS Settings')
headers={'Accept': "text/plain, text/html, */*"}
for d in sms_settings.get("parameters"):
if d.header == 1:
headers.update({d.parameter: d.value})
return headers
def send_request(gateway_url, params, headers=None, use_post=False, use_json=False):
import requests
if not headers:
headers = get_headers()
kwargs = {"headers": headers}
if use_json:
kwargs["json"] = params
elif use_post:
kwargs["data"] = params
else:
kwargs["params"] = params
if use_post:
response = requests.post(gateway_url, **kwargs)
else:
response = requests.get(gateway_url, **kwargs)
response.raise_for_status()
return response.status_code
# Create SMS Log
# =========================================================
def create_sms_log(args, sent_to):
sl = frappe.new_doc('SMS Log')
sl.sent_on = nowdate()
sl.message = args['message'].decode('utf-8')
sl.no_of_requested_sms = len(args['receiver_list'])
sl.requested_numbers = "\n".join(args['receiver_list'])
sl.no_of_sent_sms = len(sent_to)
sl.sent_to = "\n".join(sent_to)
sl.flags.ignore_permissions = True
sl.save() | 27.188406 | 98 | 0.711354 | 34 | 0.009062 | 0 | 0 | 1,008 | 0.268657 | 0 | 0 | 923 | 0.246002 |
7335d3017f92ccc28bd13ffbbbef33f7a8f4f467 | 481 | py | Python | blog/migrations/0041_auto_20190504_0855.py | akindele214/181hub_2 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | [
"MIT"
]
| 1 | 2020-05-20T08:42:49.000Z | 2020-05-20T08:42:49.000Z | blog/migrations/0041_auto_20190504_0855.py | akindele214/181hub_2 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | [
"MIT"
]
| 14 | 2020-03-24T17:31:08.000Z | 2022-03-11T23:59:30.000Z | blog/migrations/0041_auto_20190504_0855.py | akindele214/181hub_2 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | [
"MIT"
]
| 1 | 2020-04-13T12:37:37.000Z | 2020-04-13T12:37:37.000Z | # Generated by Django 2.1.5 on 2019-05-04 07:55
import blog.formatChecker
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0040_auto_20190504_0840'),
]
operations = [
migrations.AlterField(
model_name='videos',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/', validators=[blog.formatChecker.file_size]),
),
]
| 24.05 | 123 | 0.638254 | 362 | 0.752599 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.214137 |
73360c2d69e50730324e4dc6677481e54cc8e26d | 1,850 | py | Python | tardis/model/tests/test_csvy_model.py | Youssef15015/tardis | adde5b0114f23634fe5afef6937b285174ad6b55 | [
"BSD-3-Clause"
]
| null | null | null | tardis/model/tests/test_csvy_model.py | Youssef15015/tardis | adde5b0114f23634fe5afef6937b285174ad6b55 | [
"BSD-3-Clause"
]
| 2 | 2019-06-10T11:24:50.000Z | 2019-06-18T17:28:59.000Z | tardis/model/tests/test_csvy_model.py | Youssef15015/tardis | adde5b0114f23634fe5afef6937b285174ad6b55 | [
"BSD-3-Clause"
]
| 1 | 2019-06-10T10:21:41.000Z | 2019-06-10T10:21:41.000Z | import numpy as np
import numpy.testing as npt
import tardis
import os
from astropy import units as u
from tardis.io.config_reader import Configuration
from tardis.model import Radial1DModel
import pytest
DATA_PATH = os.path.join(tardis.__path__[0],'model','tests','data')
@pytest.fixture(scope="module", params=['config_csvy_full.yml',
'config_csvy_nocsv_branch85.yml',
'config_csvy_nocsv_uniform.yml',
'config_csvy_nocsv_powerlaw.yml',
'config_csvy_nocsv_exponential.yml',
'config_csvy_full_rad.yml'])
def full_filename(request):
return os.path.join(DATA_PATH, request.param)
def test_compare_models(full_filename):
tardis_config = Configuration.from_yaml(full_filename)
csvy_model = Radial1DModel.from_csvy(tardis_config)
config_model = Radial1DModel.from_config(tardis_config)
csvy_model_props = csvy_model.get_properties().keys()
config_model_props = config_model.get_properties().keys()
npt.assert_array_equal(csvy_model_props, config_model_props)
for prop in config_model_props:
csvy_model_val = csvy_model.get_properties()[prop]
config_model_val = config_model.get_properties()[prop]
if prop == 'homologous_density':
npt.assert_array_almost_equal(csvy_model_val.density_0.value, config_model_val.density_0.value)
npt.assert_array_almost_equal(csvy_model_val.time_0.value, config_model_val.time_0.value)
else:
if hasattr(config_model_val, 'value'):
config_model_val = config_model_val.value
csvy_model_val = csvy_model_val.value
npt.assert_array_almost_equal(csvy_model_val, config_model_val)
| 44.047619 | 107 | 0.682162 | 0 | 0 | 0 | 0 | 508 | 0.274595 | 0 | 0 | 233 | 0.125946 |
73391ce9c005d2972ce3d22ec1870d858657b9ce | 34,911 | py | Python | wepppy/taudem/topaz_emulator.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
]
| null | null | null | wepppy/taudem/topaz_emulator.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
]
| null | null | null | wepppy/taudem/topaz_emulator.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
]
| null | null | null | from typing import List
import os
import json
from os.path import join as _join
from os.path import exists as _exists
import math
from osgeo import gdal, osr
import numpy as np
from scipy.ndimage import label
from subprocess import Popen, PIPE
from pprint import pprint
from wepppy.all_your_base.geo import read_tif, centroid_px
from wepppy.watershed_abstraction.wepp_top_translator import WeppTopTranslator
from wepppy.watershed_abstraction.support import (
cummnorm_distance, compute_direction, representative_normalized_elevations,
weighted_slope_average, rect_to_polar, write_slp, HillSummary, ChannelSummary, CentroidSummary,
slp_asp_color, polygonize_netful, polygonize_bound, polygonize_subcatchments, json_to_wgs
)
from .taudem import TauDEMRunner
_USE_MPI = False
_DEBUG = False
class Node:
def __init__(self, tau_id, network):
self.data = tau_id
d = network[tau_id]
self.top = top = d['top']
self.bottom = bottom = d['bottom']
links = d['links']
if len(links) == 2:
refvec = np.array(bottom, dtype=float) - np.array(top, dtype=float)
links = sorted([dict(tau_id=_id, point=network[_id]['top'], origin=top, refvec=refvec)
for _id in links], key=lambda _d: rect_to_polar(_d))
links = [_d['tau_id'] for _d in links]
if len(links) > 0:
self.left = Node(links[0], network)
else:
self.left = None
if len(links) > 1:
self.right = Node(links[1], network)
else:
self.right = None
class TauDEMTopazEmulator(TauDEMRunner):
def __init__(self, wd, dem, vector_ext='geojson'):
super(TauDEMTopazEmulator, self).__init__(wd, dem, vector_ext)
# subwta
@property
def _subwta(self):
return _join(self.wd, 'subwta.tif')
# subwta
@property
def _subwta_shp(self):
return _join(self.wd, 'subwta.geojson')
# subcatchments
@property
def _subcatchments_shp(self):
return _join(self.wd, 'subcatchments.geojson')
# bound
@property
def _bound(self):
return _join(self.wd, 'bound.tif')
# bound
@property
def _bound_shp(self):
return _join(self.wd, 'bound.geojson')
# net
@property
def _netful_shp(self):
return _join(self.wd, 'netful.geojson')
@property
def _channels(self):
return _join(self.wd, 'channels.tif')
def topaz2tau_translator_factory(self):
d = self.tau2topaz_translator_factory()
return {v: k for k, v in d.items()}
def run_streamnet(self, single_watershed=False):
super(TauDEMTopazEmulator, self).run_streamnet(single_watershed=single_watershed)
tau2top_translator = self.tau2topaz_translator_factory()
with open(self._net) as fp:
js = json.load(fp)
for i, feature in enumerate(js['features']):
topaz_id = tau2top_translator[feature['properties']['WSNO']]
js['features'][i]['properties']['TopazID'] = int(str(topaz_id) + '4')
with open(self._net, 'w') as fp:
json.dump(js, fp)
cmd = ['gdal_rasterize', '-a', 'TopazID', '-a_nodata', '0',
'-a_srs', 'epsg:{}'.format(self.epsg),
'-te', self.ul_x, self.lr_y, self.lr_x, self.ul_y,
'-tr', self.cellsize, self.cellsize,
'-ot', 'UInt16', self._net, self._channels]
cmd = [str(v) for v in cmd]
print(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.wait()
assert _exists(self._channels)
def build_channels(self, csa=None):
if csa is None:
csa = 100
wd = self.wd
self.run_pitremove()
self.run_d8flowdir()
self.run_aread8()
self.run_gridnet()
self.run_src_threshold(threshold=csa)
polygonize_netful(self._src, self._netful_shp)
def set_outlet(self, lng, lat):
self.run_moveoutletstostrm(lng=lng, lat=lat)
def build_subcatchments(self, threshold=None):
self.run_peukerdouglas()
self.run_peukerdouglas_stream_delineation(threshold=threshold)
self.run_streamnet()
self.run_dinfflowdir()
self.run_areadinf()
self.run_dinfdistdown()
json_to_wgs(self._net)
self.delineate_subcatchments()
polygonize_subcatchments(self._subwta, self._subwta_shp, self._subcatchments_shp)
self.make_bound()
polygonize_bound(self._bound, self._bound_shp)
def abstract_watershed(self, wepp_chn_type,
clip_hillslopes=False, clip_hillslope_length=300.0):
self.abstract_channels(wepp_chn_type=wepp_chn_type)
self.abstract_subcatchments(clip_hillslopes=clip_hillslopes,
clip_hillslope_length=clip_hillslope_length)
self.abstract_structure()
@property
def _abstracted_channels(self):
return _join(self.wd, 'channels.json')
@property
def abstracted_channels(self):
with open(self._abstracted_channels) as fp:
summaries = json.load(fp)
translator = self.translator
chns_summary = {}
for topaz_id, d in summaries.items():
wepp_id = translator.wepp(top=topaz_id)
chn_enum = translator.chn_enum(top=topaz_id)
slope_scalar = d['slope_scalar']
aspect = d['aspect']
chns_summary[topaz_id] = \
ChannelSummary(
topaz_id=topaz_id,
wepp_id=wepp_id,
chn_enum=chn_enum,
chn_type=d['wepp_chn_type'],
isoutlet=d['isoutlet'],
length=d['length'],
width=d['width'],
order=d['order'],
aspect=aspect,
head=d['head'],
tail=d['tail'],
direction=d['direction'],
slope_scalar=slope_scalar,
color=slp_asp_color(slope_scalar, aspect),
area=d['area'],
elevs=d['elevs'],
distance_p=d['distance_p'],
slopes=d['slopes'],
centroid=CentroidSummary(
px=d['centroid_px'],
lnglat=d['centroid_lnglat']
)
)
return chns_summary
@property
def _abstracted_subcatchments(self):
return _join(self.wd, 'subcatchments.json')
@property
def abstracted_subcatchments(self):
with open(self._abstracted_subcatchments) as fp:
summaries = json.load(fp)
translator = self.translator
subs_summary = {}
for topaz_id, d in summaries.items():
wepp_id = translator.wepp(top=topaz_id)
slope_scalar = d['slope_scalar']
aspect = d['aspect']
subs_summary[topaz_id] = \
HillSummary(topaz_id=topaz_id,
wepp_id=wepp_id,
w_slopes=d['w_slopes'],
length=d['length'],
width=d['width'],
area=d['area'],
direction=d['direction'],
elevs=d['elevs'],
aspect=aspect,
slope_scalar=slope_scalar,
color=slp_asp_color(slope_scalar, aspect),
distance_p=d['distance_p'],
centroid=CentroidSummary(
px=d['centroid_px'],
lnglat=d['centroid_lnglat']
),
fp_longest=d['fp_longest'],
fp_longest_length=d['fp_longest_length'],
fp_longest_slope=d['fp_longest_slope']
)
return subs_summary
@property
def _structure(self):
return _join(self.wd, 'structure.tsv')
@property
def structure(self):
with open(self._structure) as fp:
return [[int(v) for v in line.split()] for line in fp.readlines()]
def abstract_channels(self, wepp_chn_type=None):
cellsize = self.cellsize
cellsize2 = self.cellsize2
translator = self.translator
slopes = self.data_fetcher('dinf_slope', dtype=np.float)
fvslop = self.data_fetcher('dinf_angle', dtype=np.float)
with open(self._net) as fp:
js = json.load(fp)
chn_d = {}
for feature in js['features']:
topaz_id = int(str(feature['properties']['TopazID'])[:-1])
catchment_id = feature['properties']['WSNO']
uslinkn01 = feature['properties']['USLINKNO1']
uslinkn02 = feature['properties']['USLINKNO2']
dslinkn0 = feature['properties']['DSLINKNO']
order = feature['properties']['strmOrder']
chn_id = int(str(topaz_id) + '4')
enz_coords = feature['geometry']['coordinates'] # listed bottom to top
# need to identify unique pixels
px_last, py_last = None, None
indx, indy = [], []
for e, n, z in enz_coords:
px, py = self.utm_to_px(e, n)
if px != px_last or py != py_last:
assert 0 <= px < slopes.shape[0], ((px, py), (e, n), slopes.shape)
assert 0 <= py < slopes.shape[1], ((px, py), (e, n), slopes.shape)
indx.append(px)
indy.append(py)
px_last, py_last = px, py
# the pixels are listed bottom to top we want them top to bottom as if we walked downt the flowpath
indx = indx[::-1]
indy = indy[::-1]
flowpath = np.array([indx, indy]).T
_distance = flowpath[:-1, :] - flowpath[1:, :]
distance = np.sqrt(np.power(_distance[:, 0], 2.0) +
np.power(_distance[:, 1], 2.0))
slope = np.array([slopes[px, py] for px, py in zip(indx[:-1], indy[:-1])])
assert distance.shape == slope.shape, (distance.shape, slope.shape)
if len(indx) == 1:
px, py = indx[0], indy[0]
slope_scalar = float(slopes[px, py])
slope = np.array([slope_scalar, slope_scalar])
# todo: don't think head and tail are being used any where, but these
# are inconsistent with case when there is more than one pixel
head = enz_coords[-1][:-1]
tail = enz_coords[0][:-1]
direction = compute_direction(head, tail)
length = np.linalg.norm(np.array(head) - np.array(tail))
if length < cellsize:
length = cellsize
width = cellsize2 / length
distance_p = [0.0, 1.0]
elevs = representative_normalized_elevations(distance_p, list(slope))
else:
# need normalized distance_p to define slope
distance_p = cummnorm_distance(distance)
if len(slope) == 1:
slope = np.array([float(slope), float(slope)])
# calculate the length from the distance array
length = float(np.sum(distance) * cellsize)
width = float(cellsize)
# aspect = float(self._determine_aspect(indx, indy))
head = [v * cellsize for v in flowpath[-1]]
head = [float(v) for v in head]
tail = [v * cellsize for v in flowpath[0]]
tail = [float(v) for v in tail]
direction = compute_direction(head, tail)
elevs = representative_normalized_elevations(distance_p, list(slope))
slope_scalar = float(abs(elevs[-1]))
area = float(length) * float(width)
# calculate aspect
aspect = np.mean(np.angle([np.complex(np.cos(rad), np.sin(rad)) for rad in fvslop[(indx, indy)]], deg=True))
isoutlet = dslinkn0 == -1
c_px, c_py = centroid_px(indx, indy)
centroid_lnglat = self.px_to_lnglat(c_px, c_py)
chn_enum = translator.chn_enum(chn_id=chn_id)
chn_d[str(chn_id)] = dict(chn_id=int(chn_id),
chn_enum=int(chn_enum),
order=int(order),
length=float(length),
width=float(width),
area=float(area),
elevs=[float(v) for v in elevs],
wepp_chn_type=wepp_chn_type,
head=head,
tail=tail,
aspect=float(aspect),
slopes=[float(v) for v in slope],
isoutlet=isoutlet,
direction=float(direction),
distance_p=[float(v) for v in distance_p],
centroid_px=[int(c_px), int(c_py)],
centroid_lnglat=[float(v) for v in centroid_lnglat],
slope_scalar=float(slope_scalar)
)
with open(self._abstracted_channels, 'w') as fp:
json.dump(chn_d, fp, indent=2, sort_keys=True)
@property
def topaz_sub_ids(self):
subwta = self.data_fetcher('subwta', dtype=np.uint16)
sub_ids = sorted(list(set(subwta.flatten())))
if 0 in sub_ids:
sub_ids.remove(0)
sub_ids = [v for v in sub_ids if not str(v).endswith('4')]
return sub_ids
@property
def topaz_chn_ids(self):
with open(self._net) as fp:
js = json.load(fp)
chn_ids = []
for feature in js['features']:
chn_ids.append(feature['properties']['TopazID'])
return chn_ids
@property
def translator(self):
return WeppTopTranslator(top_sub_ids=self.topaz_sub_ids, top_chn_ids=self.topaz_chn_ids)
def abstract_subcatchments(self, clip_hillslopes=False, clip_hillslope_length=300.0):
"""
in: dinf_dd_horizontal, dinf_dd_vertical, dinf_dd_surface, dinf_slope, subwta
:return:
"""
cellsize = self.cellsize
cellsize2 = self.cellsize2
sub_ids = self.topaz_sub_ids
assert _exists(self._dinf_dd_horizontal), self._dinf_dd_horizontal
assert _exists(self._dinf_dd_vertical), self._dinf_dd_vertical
assert _exists(self._dinf_dd_surface), self._dinf_dd_surface
assert _exists(self._dinf_slope), self._dinf_slope
assert _exists(self._subwta), self._subwta
assert _exists(self._dinf_angle), self._dinf_angle
subwta = self.data_fetcher('subwta', dtype=np.uint16)
lengths = self.data_fetcher('dinf_dd_horizontal', dtype=np.float)
verticals = self.data_fetcher('dinf_dd_vertical', dtype=np.float)
surface_lengths = self.data_fetcher('dinf_dd_surface', dtype=np.float)
slopes = self.data_fetcher('dinf_slope', dtype=np.float)
aspects = self.data_fetcher('dinf_angle', dtype=np.float)
chns_d = self.abstracted_channels
subs_d = {}
for sub_id in sub_ids:
# identify cooresponding channel
chn_id = str(sub_id)[:-1] + '4'
# identify indicies of sub_id
raw_indx, raw_indy = np.where(subwta == sub_id)
area = float(len(raw_indx)) * cellsize2
indx, indy = [], []
for _x, _y in zip(raw_indx, raw_indy):
if lengths[_x, _y] >= 0.0:
indx.append(_x)
indy.append(_y)
if len(indx) == 0:
print('sub_id', sub_id)
print('raw_indx, raw_indy', raw_indx, raw_indy)
print(lengths[(raw_indx, raw_indy)])
print(surface_lengths[(raw_indx, raw_indy)])
print(slopes[(raw_indx, raw_indy)])
print(aspects[(raw_indx, raw_indy)])
width = length = math.sqrt(area)
_slp = np.mean(slopes[(raw_indx, raw_indy)])
w_slopes = [_slp, _slp]
distance_p = [0, 1]
fp_longest = None
fp_longest_length = length
fp_longest_slope = _slp
else:
# extract flowpath statistics
fp_lengths = lengths[(indx, indy)]
fp_lengths += cellsize
fp_verticals = verticals[(indx, indy)]
fp_surface_lengths = surface_lengths[(indx, indy)]
fp_surface_lengths += cellsize
fp_surface_areas = np.ceil(fp_surface_lengths) * cellsize
fp_slopes = slopes[(indx, indy)]
length = float(np.sum(fp_lengths * fp_surface_areas) / np.sum(fp_surface_areas))
if clip_hillslopes and length > clip_hillslope_length:
length = clip_hillslope_length
width = area / length
# if str(sub_id).endswith('1'):
# # determine representative length and width
# # Cochrane dissertation eq 3.4
#
# #print('sub_id', sub_id)
# #pprint('fp_lengths')
# #pprint(fp_lengths)
# #pprint('fp_surface_areas')
# #pprint(fp_surface_areas)
# length = float(np.sum(fp_lengths * fp_surface_areas) / np.sum(fp_surface_areas))
# width = area / length
#
# #print('area', area)
# #print('width', width)
# #print('length', length, '\n\n\n')
# else:
# width = chns_d[chn_id].length
# length = area / width
# determine representative slope profile
w_slopes, distance_p = weighted_slope_average(fp_surface_areas, fp_slopes, fp_lengths)
# calculate longest flowpath statistics
fp_longest = int(np.argmax(fp_lengths))
fp_longest_vertical = fp_verticals[fp_longest]
fp_longest_length = fp_lengths[fp_longest]
fp_longest_slope = fp_longest_vertical / fp_longest_length
# calculate slope for hillslope
elevs = representative_normalized_elevations(distance_p, w_slopes)
slope_scalar = float(abs(elevs[-1]))
# calculate aspect
_aspects = aspects[(indx, indy)]
aspect = np.mean(np.angle([np.complex(np.cos(rad), np.sin(rad)) for rad in _aspects], deg=True))
# calculate centroid
c_px, c_py = centroid_px(raw_indx, raw_indy)
centroid_lnglat = self.px_to_lnglat(c_px, c_py)
direction = chns_d[chn_id].direction
if str(sub_id).endswith('2'):
direction += 90
if str(sub_id).endswith('3'):
direction -= 90
subs_d[str(sub_id)] = dict(sub_id=int(sub_id),
area=float(area),
length=float(length),
aspect=float(aspect),
direction=float(direction),
width=float(width),
w_slopes=list(w_slopes),
distance_p=list(distance_p),
centroid_lnglat=[float(v) for v in centroid_lnglat],
centroid_px=[int(c_px), int(c_py)],
elevs=list(elevs),
slope_scalar=float(slope_scalar),
fp_longest=fp_longest,
fp_longest_length=float(fp_longest_length),
fp_longest_slope=float(fp_longest_slope)
)
with open(self._abstracted_subcatchments, 'w') as fp:
json.dump(subs_d, fp, indent=2, sort_keys=True)
def abstract_structure(self, verbose=False):
translator = self.translator
topaz_network = self.topaz_network
# now we are going to define the lines of the structure file
# this doesn't handle impoundments
structure = []
for chn_id in translator.iter_chn_ids():
if verbose:
print('abstracting structure for channel %s...' % chn_id)
top = translator.top(chn_id=chn_id)
chn_enum = translator.chn_enum(chn_id=chn_id)
# right subcatchments end in 2
hright = top - 2
if not translator.has_top(hright):
hright = 0
# left subcatchments end in 3
hleft = top - 1
if not translator.has_top(hleft):
hleft = 0
# center subcatchments end in 1
hcenter = top - 3
if not translator.has_top(hcenter):
hcenter = 0
# define structure for channel
# the first item defines the channel
_structure = [chn_enum]
# network is defined from the NETW.TAB file that has
# already been read into {network}
# the 0s are appended to make sure it has a length of
# at least 3
chns = topaz_network[top] + [0, 0, 0]
# structure line with top ids
_structure += [hright, hleft, hcenter] + chns[:3]
# this is where we would handle impoundments
# for now no impoundments are assumed
_structure += [0, 0, 0]
# and translate topaz to wepp
structure.append([int(v) for v in _structure])
with open(self._structure, 'w') as fp:
for row in structure:
fp.write('\t'.join([str(v) for v in row]))
fp.write('\n')
def delineate_subcatchments(self, use_topaz_ids=True):
"""
in: pksrc, net
out: subwta
:return:
"""
w_data = self.data_fetcher('w', dtype=np.int32)
_src_data = self.data_fetcher('pksrc', dtype=np.int32)
src_data = np.zeros(_src_data.shape, dtype=np.int32)
src_data[np.where(_src_data == 1)] = 1
subwta = np.zeros(w_data.shape, dtype=np.uint16)
with open(self._net) as fp:
js = json.load(fp)
# identify pourpoints of the end node catchments
end_node_pourpoints = {}
for feature in js['features']:
catchment_id = feature['properties']['WSNO']
coords = feature['geometry']['coordinates']
uslinkn01 = feature['properties']['USLINKNO1']
uslinkn02 = feature['properties']['USLINKNO2']
end_node = uslinkn01 == -1 and uslinkn02 == -1
top = coords[-1][:-1]
if end_node:
end_node_pourpoints[catchment_id] = top
# make geojson with pourpoints as input for gage watershed
outlets_fn = _join(self.wd, 'outlets.geojson')
self._make_multiple_outlets_geojson(dst=outlets_fn, en_points_dict=end_node_pourpoints)
gw_fn = _join(self.wd, 'end_nodes_gw.tif')
self._run_gagewatershed(outlets_fn=outlets_fn, dst=gw_fn)
gw, _, _ = read_tif(gw_fn, dtype=np.int16)
for _pass in range(2):
for feature in js['features']:
topaz_id = int(str(feature['properties']['TopazID'])[:-1])
catchment_id = feature['properties']['WSNO']
coords = feature['geometry']['coordinates']
uslinkn01 = feature['properties']['USLINKNO1']
uslinkn02 = feature['properties']['USLINKNO2']
end_node = uslinkn01 == -1 and uslinkn02 == -1
if (end_node and _pass) or (not end_node and not _pass):
continue # this has already been processed
top = coords[-1]
bottom = coords[0]
top_px = self.utm_to_px(top[0], top[1])
bottom_px = self.utm_to_px(bottom[0], bottom[1])
# need a mask for the side subcatchments
catchment_data = np.zeros(w_data.shape, dtype=np.int32)
catchment_data[np.where(w_data == catchment_id)] = 1
if end_node:
# restrict the end node catchment the catchment area.
# otherwise there are cases where it gets drainage from beyond the watershed
gw_sub = gw * catchment_data
# identify top subcatchment cells
gw_indx = np.where(gw_sub == catchment_id)
# copy the top subcatchment to the subwta raster
if use_topaz_ids:
subwta[gw_indx] = int(str(topaz_id) + '1')
else:
subwta[gw_indx] = int(str(catchment_id) + '1')
# remove end subcatchments from the catchment mask
catchment_data[np.where(subwta != 0)] = 0
# remove channels from catchment mask
catchment_data -= src_data
catchment_data = np.clip(catchment_data, a_min=0, a_max=1)
indx, indy = np.where(catchment_data == 1)
print(catchment_id, _pass, len(indx))
# the whole catchment drains through the top of the channel
if len(indx) == 0:
continue
if _DEBUG:
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(_join(self.wd, 'catchment_for_label_%05i.tif' % catchment_id),
xsize=subwta.shape[0], ysize=subwta.shape[1],
bands=1, eType=gdal.GDT_Int32,
options=['COMPRESS=LZW', 'PREDICTOR=2'])
dst_ds.SetGeoTransform(self.transform)
dst_ds.SetProjection(self.srs_wkt)
band = dst_ds.GetRasterBand(1)
band.WriteArray(catchment_data.T)
dst_ds = None
# we are going to crop the catchment for scipy.ndimage.label. It is really slow otherwise
# to do this we identify the bounds and then add a pad
pad = 1
x0, xend = np.min(indx), np.max(indx)
if x0 >= pad:
x0 -= pad
else:
x0 = 0
if xend < self.num_cols - pad:
xend += pad
else:
xend = self.num_cols - 1
y0, yend = np.min(indy), np.max(indy)
if y0 >= pad:
y0 -= pad
else:
y0 = 0
if yend < self.num_rows - pad:
yend += pad
else:
yend = self.num_rows - 1
# crop to just the side channel catchments
_catchment_data = catchment_data[x0:xend, y0:yend]
# use scipy.ndimage.label to identify side subcatchments
# todo: compare performance to opencv connectedComponents
# https://stackoverflow.com/questions/46441893/connected-component-labeling-in-python
subcatchment_data, n_labels = label(_catchment_data)
# isolated pixels in the channel can get misidentified as subcatchments
# this gets rid of those
subcatchment_data -= src_data[x0:xend, y0:yend]
# we only want the two largest subcatchments. These should be the side subcatchments
# so we need to identify which are the largest
sub_d = []
for i in range(n_labels):
s_indx, s_indy = np.where(subcatchment_data == i + 1)
sub_d.append(dict(rank=len(s_indx), s_indx=s_indx, s_indy=s_indy,
point=(x0 + np.mean(s_indx), y0 + np.mean(s_indy)),
origin=(float(bottom_px[0]), float(bottom_px[1])),
refvec=np.array(top_px, dtype=float) - np.array(bottom_px, dtype=float)
)
)
# sort clockwise
sub_d = sorted(sub_d, key=lambda _d: _d['rank'], reverse=True)
if len(sub_d) > 2:
sub_d = sub_d[:2]
sub_d = sorted(sub_d, key=lambda _d: rect_to_polar(_d))
# assert len(sub_d) == 2
k = 2
for d in sub_d:
if use_topaz_ids:
subwta[x0:xend, y0:yend][d['s_indx'], d['s_indy']] = int(str(topaz_id) + str(k))
else:
subwta[x0:xend, y0:yend][d['s_indx'], d['s_indy']] = int(str(catchment_id) + str(k))
k += 1
channels = self.data_fetcher('channels', dtype=np.int32)
ind = np.where(subwta == 0)
subwta[ind] = channels[ind]
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(self._subwta, xsize=subwta.shape[0], ysize=subwta.shape[1],
bands=1, eType=gdal.GDT_UInt16, options=['COMPRESS=LZW', 'PREDICTOR=2'])
dst_ds.SetGeoTransform(self.transform)
dst_ds.SetProjection(self.srs_wkt)
band = dst_ds.GetRasterBand(1)
band.WriteArray(subwta.T)
band.SetNoDataValue(0)
dst_ds = None
def make_bound(self):
w_data = self.data_fetcher('w', dtype=np.int32)
bound = np.zeros(w_data.shape, dtype=np.int32)
bound[np.where(w_data > 0)] = 1
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(self._bound, xsize=bound.shape[0], ysize=bound.shape[1],
bands=1, eType=gdal.GDT_Byte, options=['COMPRESS=LZW', 'PREDICTOR=2'])
dst_ds.SetGeoTransform(self.transform)
dst_ds.SetProjection(self.srs_wkt)
band = dst_ds.GetRasterBand(1)
band.WriteArray(bound.T)
band.SetNoDataValue(0)
dst_ds = None
def calculate_watershed_statistics(self):
bound = self.data_fetcher('bound', dtype=np.int32)
fvslop = self.data_fetcher('dinf_angle', dtype=np.float32)
relief = self.data_fetcher('fel', dtype=np.float32)
# calculate descriptive statistics
cellsize = self.cellsize
wsarea = float(np.sum(bound) * cellsize * cellsize)
mask = -1 * bound + 1
# determine area with slope > 30
fvslop_ma = np.ma.masked_array(fvslop, mask=mask)
indx, indy = np.ma.where(fvslop_ma > 0.3)
area_gt30 = float(len(indx) * cellsize * cellsize)
# determine ruggedness of watershed
relief_ma = np.ma.masked_array(relief, mask=mask)
minz = float(np.min(relief_ma))
maxz = float(np.max(relief_ma))
ruggedness = float((maxz - minz) / math.sqrt(wsarea))
indx, indy = np.ma.where(bound == 1)
ws_cen_px, ws_cen_py = int(np.round(np.mean(indx))), int(np.round(np.mean(indy)))
ws_centroid = self.px_to_lnglat(ws_cen_px, ws_cen_py)
outlet_top_id = None # todo
return dict(wsarea=wsarea,
area_gt30=area_gt30,
ruggedness=ruggedness,
minz=minz,
maxz=maxz,
ws_centroid=ws_centroid,
outlet_top_id=outlet_top_id,)
@property
def topaz_network(self):
tau2top = self.tau2topaz_translator_factory()
network = self.network
top_network = {}
for tau_id, d in network.items():
topaz_id = int(str(tau2top[tau_id]) + '4')
links = [int(str(tau2top[_tau_id]) + '4') for _tau_id in d['links']]
top_network[topaz_id] = links
return top_network
def tau2topaz_translator_factory(self):
tree = Node(self.outlet_tau_id, self.network)
def preorder_traverse(node):
res = []
if node:
res.append(node.data)
res.extend(preorder_traverse(node.left))
res.extend(preorder_traverse(node.right))
return res
tau_ids = preorder_traverse(tree)
if _DEBUG:
print('network', tau_ids)
d = {tau_id: i+2 for i, tau_id in enumerate(tau_ids)}
return d
def write_slps(self, out_dir, channels=1, subcatchments=1, flowpaths=0):
"""
Writes slope files to the specified wat_dir. The channels,
subcatchments, and flowpaths args specify what slope files
should be written.
"""
if channels:
self._make_channel_slps(out_dir)
if subcatchments:
self._write_subcatchment_slps(out_dir)
if flowpaths:
raise NotImplementedError
def _make_channel_slps(self, out_dir):
channels = self.abstracted_channels
translator = self.translator
chn_ids = channels.keys()
chn_enums = sorted([translator.chn_enum(chn_id=v) for v in chn_ids])
# watershed run requires a slope file defining all of the channels in the
# 99.1 format. Here we write a combined channel slope file and a slope
# file for each individual channel
fp2 = open(_join(out_dir, 'channels.slp'), 'w')
fp2.write('99.1\n')
fp2.write('%i\n' % len(chn_enums))
for chn_enum in chn_enums:
top = translator.top(chn_enum=chn_enum)
chn_id = str(top)
d = channels[chn_id]
_chn_wepp_width = d.chn_wepp_width
write_slp(d.aspect, d.width, _chn_wepp_width, d.length,
d.slopes, d.distance_p, fp2, 99.1)
fp2.close()
def _write_subcatchment_slps(self, out_dir):
subcatchments = self.abstracted_subcatchments
cellsize = self.cellsize
for sub_id, d in subcatchments.items():
slp_fn = _join(out_dir, 'hill_%s.slp' % sub_id)
fp = open(slp_fn, 'w')
write_slp(d.aspect, d.width, cellsize, d.length,
d.w_slopes, d.distance_p, fp, 97.3)
fp.close()
| 37.619612 | 120 | 0.540661 | 34,098 | 0.976712 | 0 | 0 | 5,046 | 0.144539 | 0 | 0 | 5,517 | 0.15803 |
733a0eff21e557f8f32c9d92815d4f668db0c2d8 | 47,930 | py | Python | PyRSM/utils.py | chdahlqvist/RSMmap | 53984967d612eaf4feb90ba4972109638f6cf70a | [
"MIT"
]
| 3 | 2021-05-18T16:40:13.000Z | 2022-03-17T15:32:31.000Z | PyRSM/utils.py | chdahlqvist/RSMmap | 53984967d612eaf4feb90ba4972109638f6cf70a | [
"MIT"
]
| null | null | null | PyRSM/utils.py | chdahlqvist/RSMmap | 53984967d612eaf4feb90ba4972109638f6cf70a | [
"MIT"
]
| 1 | 2022-01-19T11:04:21.000Z | 2022-01-19T11:04:21.000Z | """
Set of functions used by the PyRSM class to compute detection maps and optimize the parameters
of the RSM algorithm and PSF-subtraction techniques via the auto-RSM and auto-S/N frameworks
"""
__author__ = 'Carl-Henrik Dahlqvist'
from scipy.interpolate import Rbf
import pandas as pd
import numpy.linalg as la
from vip_hci.var import get_annulus_segments, frame_center,prepare_matrix
from vip_hci.preproc.derotation import _define_annuli
import numpy as np
from vip_hci.preproc import cube_derotate, cube_collapse, check_pa_vector,check_scal_vector
from vip_hci.preproc.derotation import _find_indices_adi
from vip_hci.preproc.rescaling import _find_indices_sdi
import scipy as sp
from multiprocessing import cpu_count
from vip_hci.conf.utils_conf import pool_map, iterable
from vip_hci.pca.svd import get_eigenvectors
from vip_hci.llsg.llsg import _patch_rlrps
from vip_hci.preproc import cube_rescaling_wavelengths as scwave
import vip_hci as vip
from sklearn.decomposition import NMF as NMF_sklearn
def check_delta_sep(scale_list,delta_sep,minradius,fwhm,c):
wl = np.asarray(scale_list)
wl_ref = wl[len(wl)//2]
sep_lft = (wl_ref - wl) / wl_ref * ((minradius + fwhm * delta_sep) / fwhm)
sep_rgt = (wl - wl_ref) / wl_ref * ((minradius - fwhm * delta_sep) / fwhm)
map_lft = sep_lft >= delta_sep
map_rgt = sep_rgt >= delta_sep
indices = np.nonzero(map_lft | map_rgt)[0]
if indices.size == 0:
raise RuntimeError(("No frames left after radial motion threshold for cube {}. Try "
"decreasing the value of `delta_sep`").format(c))
def rot_scale(step,cube,cube_scaled,angle_list,scale_list, imlib, interpolation):
"""
Function used to rescale the frames when relying on ADI+SDI before the computation the reference PSF
(step='ini') and rescale and derotate the frames to generate the cube of residuals used by the RSM
algorithm (step='fin').
Parameters
----------
step: str
'ini' before the reference PSF computation and 'fin' after PSF subtraction.
cube: numpy ndarray, 3d or 4d
Original cube
cube_scaled: numpy ndarray, 3d
Cube of residuals to be rescaled and derotated (None for the step='ini')
angle_list : numpy ndarray, 1d
Parallactic angles for each frame of the ADI sequences.
scale_list: numpy ndarray, 1d, optional
Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the
scaling factors are the central channel wavelength divided by the
shortest wavelength in the cube (more thorough approaches can be used
to get the scaling factors). This scaling factors are used to re-scale
the spectral channels and align the speckles. Default is None
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
"""
if cube.ndim == 4:
z, n, y_in, x_in = cube.shape
scale_list = check_scal_vector(scale_list)
if step=='ini':
# rescaled cube, aligning speckles for SDI
for i in range(n):
if i==0:
fin_cube = scwave(cube[:, i, :, :], scale_list,
imlib=imlib, interpolation=interpolation)[0]
fin_pa=np.repeat(angle_list[i],z)
fin_scale=scale_list
else:
fin_cube = np.append(fin_cube,scwave(cube[:, i, :, :], scale_list,
imlib=imlib, interpolation=interpolation)[0],axis=0)
fin_pa=np.append(fin_pa,np.repeat(angle_list[i],z),axis=0)
fin_scale=np.append(fin_scale,scale_list,axis=0)
return fin_cube,fin_pa,fin_scale
elif step=='fin':
cube_fin=np.zeros((n,y_in, x_in))
cube_rescaled = scwave(cube_scaled, scale_list,
full_output=True, inverse=True,
y_in=y_in, x_in=x_in, imlib=imlib,
interpolation=interpolation)[0]
cube_derotated=cube_derotate(cube_rescaled,angle_list, interpolation=interpolation,imlib=imlib)
for i in range(n):
cube_fin[i]=np.mean(cube_derotated[(i*z):((i+1)*z),:,:],axis=0)
return cube_fin
if cube.ndim == 3:
if step=='ini':
return cube,angle_list,None
elif step=='fin':
cube_derotated=cube_derotate(cube_scaled,angle_list, interpolation=interpolation,imlib=imlib)
return cube_derotated
def remove_outliers(time_s, range_sel, k=5, t0=3):
"""
Hampel Filter to remove potential outliers in the set of selected parameters
for the annular mode of the auto-RSM framework
"""
vals=pd.DataFrame(data=time_s[range_sel])
L= 1.4826
rolling_median=vals.rolling(k).median()
difference=np.abs(rolling_median-vals)
median_abs_deviation=difference.rolling(k).median()
threshold= t0 *L * median_abs_deviation
outlier_idx=difference>threshold
vals[outlier_idx]=threshold[outlier_idx]
return(vals.to_numpy().reshape(-1))
def interpolation(time_s,range_sel):
"""
Interpolation algorithm for the RSM parameters
for the annular mode of the auto-RSM framework
"""
time_series=time_s.copy()
time_series[range_sel]=remove_outliers(time_series,range_sel)
fit = Rbf(range_sel,time_s[range_sel])
inter_point = np.linspace(range_sel[0],range_sel[-1]+1, num=(range_sel[-1]-range_sel[0]+1), endpoint=True)
return fit(inter_point)
def poly_fit(time_s,range_sel,poly_n):
"""
Smoothing procedure for the computation of the final radial thresholds
which are subtracted from the final RSM detection map in the final step
of the auto-RSM framework
"""
time_series=time_s.copy()
time_series[range_sel]=remove_outliers(time_series,range_sel)
fit_p=np.poly1d(np.polyfit(range_sel,time_series[range_sel], poly_n))
time_series=fit_p(range(len(time_series)))
return time_series
def get_time_series(mcube,ann_center):
"""
Function defining and ordering (anti-clockwise) the pixels composing
an annulus at a radial distance of ann_center for an ADI sequence mcube
"""
if mcube.ndim == 4:
indices = get_annulus_segments(mcube[0,0,:,:], ann_center,1,4,90)
else:
indices = get_annulus_segments(mcube[0], ann_center,1,4,90)
tempind=np.vstack((indices[0][0],indices[0][1]))
ind = np.lexsort((tempind[0], tempind[1]))
indicesy=tempind[0,ind[::-1]]
indicesx=tempind[1,ind[::-1]]
tempind=np.vstack((indices[1][0],indices[1][1]))
ind = np.lexsort((-tempind[0], tempind[1]))
indicesy=np.hstack((indicesy,tempind[0,ind[::-1]]))
indicesx=np.hstack((indicesx,tempind[1,ind[::-1]]))
tempind=np.vstack((indices[2][0],indices[2][1]))
ind = np.lexsort((tempind[0], tempind[1]))
indicesy=np.hstack((indicesy,tempind[0,ind]))
indicesx=np.hstack((indicesx,tempind[1,ind]))
tempind=np.vstack((indices[3][0],indices[3][1]))
ind = np.lexsort((-tempind[0], tempind[1]))
indicesy=np.hstack((indicesy,tempind[0,ind]))
indicesx=np.hstack((indicesx,tempind[1,ind]))
return indicesy,indicesx
def perturb(frame,model_matrix,numbasis,evals_matrix, evecs_matrix, KL_basis_matrix,sci_mean_sub_matrix,refs_mean_sub_matrix, angle_list, fwhm, pa_threshold, ann_center):
"""
Function allowing the estimation of the PSF forward model when relying on KLIP
for the computation of the speckle field. The code is based on the PyKLIP library
considering only the ADI case with a singlle number of principal components considered.
For more details about the code, consider the PyKLIP library or the originall articles
(Pueyo, L. 2016, ApJ, 824, 117 or
Ruffio, J.-B., Macintosh, B., Wang, J. J., & Pueyo, L. 2017, ApJ, 842)
"""
#Selection of the reference library based on the given parralactic angle threshold
if pa_threshold != 0:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
models_ref = model_matrix[indices_left]
else:
models_ref = model_matrix
#Computation of the self-subtraction and over-subtraction for the current frame
model_sci = model_matrix[frame]
KL_basis=KL_basis_matrix[frame]
sci_mean_sub=sci_mean_sub_matrix[frame]
refs_mean_sub=refs_mean_sub_matrix[frame]
evals=evals_matrix[frame]
evecs=evecs_matrix[frame]
max_basis = KL_basis.shape[0]
N_pix = KL_basis.shape[1]
models_mean_sub = models_ref - np.nanmean(models_ref, axis=1)[:,None]
models_mean_sub[np.where(np.isnan(models_mean_sub))] = 0
model_sci_mean_sub = model_sci- np.nanmean(model_sci)
model_sci_mean_sub[np.where(np.isnan(model_sci_mean_sub))] = 0
model_sci_mean_sub_rows = np.reshape(model_sci_mean_sub,(1,N_pix))
sci_mean_sub_rows = np.reshape(sci_mean_sub,(1,N_pix))
delta_KL = np.zeros([max_basis, N_pix])
models_mean_sub_X_refs_mean_sub_T = models_mean_sub.dot(refs_mean_sub.transpose())
for k in range(max_basis):
Zk = np.reshape(KL_basis[k,:],(1,KL_basis[k,:].size))
Vk = (evecs[:,k])[:,None]
diagVk_X_models_mean_sub_X_refs_mean_sub_T = (Vk.T).dot(models_mean_sub_X_refs_mean_sub_T)
models_mean_sub_X_refs_mean_sub_T_X_Vk = models_mean_sub_X_refs_mean_sub_T.dot(Vk)
DeltaZk = -(1/(2*np.sqrt(evals[k])))*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vk) + ((Vk.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zk)+(Vk.T).dot(models_mean_sub)
for j in range(k):
Zj = KL_basis[j, :][None,:]
Vj = evecs[:, j][:,None]
DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj)
for j in range(k+1, max_basis):
Zj = KL_basis[j, :][None,:]
Vj = evecs[:, j][:,None]
DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj)
delta_KL[k] = DeltaZk/np.sqrt(evals[k])
oversubtraction_inner_products = np.dot(model_sci_mean_sub_rows, KL_basis.T)
selfsubtraction_1_inner_products = np.dot(sci_mean_sub_rows, delta_KL.T)
selfsubtraction_2_inner_products = np.dot(sci_mean_sub_rows, KL_basis.T)
oversubtraction_inner_products[max_basis::] = 0
klipped_oversub = np.dot(oversubtraction_inner_products, KL_basis)
selfsubtraction_1_inner_products[0,max_basis::] = 0
selfsubtraction_2_inner_products[0,max_basis::] = 0
klipped_selfsub = np.dot(selfsubtraction_1_inner_products, KL_basis) + \
np.dot(selfsubtraction_2_inner_products, delta_KL)
return model_sci[None,:] - klipped_oversub - klipped_selfsub
def KLIP(cube, angle_list, nann=None, local=False, fwhm=4, asize=2, n_segments=1,delta_rot=1, ncomp=1,min_frames_lib=2, max_frames_lib=200,imlib='opencv',nframes=None, interpolation='lanczos4', collapse='median',full_output=False, verbose=1):
"""
Function allowing the estimation of the cube of residuals after
the subtraction of the speckle field modeled via the KLIP framework
"""
array = cube
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array')
if array.shape[0] != angle_list.shape[0]:
raise TypeError('Input vector or parallactic angles has wrong length')
n, y, _ = array.shape
angle_list = check_pa_vector(angle_list)
if asize is None:
annulus_width = int(np.ceil(2 * fwhm))
elif isinstance(asize, int):
annulus_width = asize
# Annulus parametrization
radius_int=fwhm
if local==True:
if nann> 2*annulus_width:
n_annuli = 5
radius_int=(nann//annulus_width-2)*annulus_width
else:
n_annuli = 4
radius_int=(nann//annulus_width-1)*annulus_width
else:
n_annuli = int((y / 2 - radius_int) / asize)
# Definition of the number of segment for the diifferent annuli
if isinstance(n_segments, int):
n_segments = [n_segments for _ in range(n_annuli)]
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2)
n_segments.append(3)
ld = 2 * np.tan(360 / 4 / 2) * asize
for i in range(2, n_annuli):
radius = i * asize
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360 / ang)))
if verbose:
msg = '# annuli = {}, Ann width = {}, FWHM = {:.3f}'
print(msg.format(n_annuli, asize, fwhm))
print('PCA per annulus (or annular sectors):')
# Definition of the annuli and the corresmponding parralactic angle threshold
cube_out = np.zeros_like(array)
for ann in range(n_annuli):
if isinstance(ncomp, list) or isinstance(ncomp, np.ndarray):
if len(ncomp) == n_annuli:
ncompann = ncomp[ann]
else:
msge = 'If ncomp is a list, it must match the number of annuli'
raise TypeError(msge)
else:
ncompann = ncomp
inner_radius = radius_int + ann * annulus_width
n_segments_ann = n_segments[ann]
if verbose:
print('{} : in_rad={}, n_segm={}'.format(ann+1, inner_radius,
n_segments_ann))
theta_init = 90
res_ann_par = _define_annuli(angle_list, ann, int((y / 2 - radius_int) / asize), fwhm,radius_int, annulus_width, delta_rot,n_segments_ann, verbose)
pa_thr, inner_radius, ann_center = res_ann_par
indices = get_annulus_segments(array[0], inner_radius, annulus_width,n_segments_ann,theta_init)
# Computation of the speckle field for the different frames and estimation of the cube of residuals
for j in range(n_segments_ann):
for k in range(array.shape[0]):
res =KLIP_patch(k,array[:, indices[j][0], indices[j][1]], ncompann, angle_list, fwhm, pa_thr, ann_center,nframes=nframes)
cube_out[k,indices[j][0], indices[j][1]] = res[3]
# Cube is derotated according to the parallactic angle and collapsed
cube_der = cube_derotate(cube_out, angle_list, imlib=imlib,interpolation=interpolation)
frame = cube_collapse(cube_der, mode=collapse)
if full_output:
return cube_out, cube_der, frame
else:
return frame
def KLIP_patch(frame, matrix, numbasis, angle_list, fwhm, pa_threshold, ann_center,nframes=None):
"""
Function allowing the computation via KLIP of the speckle field for a
given sub-region of the original ADI sequence. Code inspired by the PyKLIP librabry
"""
max_frames_lib=200
if pa_threshold != 0:
if ann_center > fwhm*20:
indices_left = _find_indices_adi(angle_list,frame,pa_threshold, truncate=True,max_frames=max_frames_lib)
else:
indices_left = _find_indices_adi(angle_list, frame,pa_threshold, truncate=False,nframes=nframes)
refs = matrix[indices_left]
else:
refs = matrix
sci = matrix[frame]
sci_mean_sub = sci - np.nanmean(sci)
#sci_mean_sub[np.where(np.isnan(sci_mean_sub))] = 0
refs_mean_sub = refs- np.nanmean(refs, axis=1)[:, None]
#refs_mean_sub[np.where(np.isnan(refs_mean_sub))] = 0
# Covariance matrix definition
covar_psfs = np.cov(refs_mean_sub)
covar_psfs *= (np.size(sci)-1)
tot_basis = covar_psfs.shape[0]
numbasis = np.clip(numbasis - 1, 0, tot_basis-1)
max_basis = np.max(numbasis) + 1
#Computation of the eigenvectors/values of the covariance matrix
evals, evecs = la.eigh(covar_psfs)
evals = np.copy(evals[int(tot_basis-max_basis):int(tot_basis)])
evecs = np.copy(evecs[:,int(tot_basis-max_basis):int(tot_basis)])
evals = np.copy(evals[::-1])
evecs = np.copy(evecs[:,::-1])
# Computation of the principal components
KL_basis = np.dot(refs_mean_sub.T,evecs)
KL_basis = KL_basis * (1. / np.sqrt(evals))[None,:]
KL_basis = KL_basis.T
N_pix = np.size(sci_mean_sub)
sci_rows = np.reshape(sci_mean_sub, (1,N_pix))
inner_products = np.dot(sci_rows, KL_basis.T)
inner_products[0,int(max_basis)::]=0
#Projection of the science image on the selected prinicpal component
#to generate the speckle field model
klip_reconstruction = np.dot(inner_products, KL_basis)
# Subtraction of the speckle field model from the riginal science image
#to obtain the residual frame
sub_img_rows = sci_rows - klip_reconstruction
return evals,evecs,KL_basis,np.reshape(sub_img_rows, (N_pix)),refs_mean_sub,sci_mean_sub
def LOCI_FM(cube, psf, ann_center, angle_list,scale_list, asize,fwhm, Tol,delta_rot,delta_sep):
"""
Computation of the optimal factors weigthing the linear combination of reference
frames used to obtain the modeled speckle field for each frame and allowing the
determination of the forward modeled PSF. Estimation of the cube
of residuals based on the modeled speckle field.
"""
cube_res = np.zeros_like(cube)
ceny, cenx = frame_center(cube[0])
radius_int=ann_center-int(1.5*asize)
if radius_int<=0:
radius_int=1
for ann in range(3):
n_segments_ann = 1
inner_radius_ann = radius_int + ann*asize
pa_threshold = _define_annuli(angle_list, ann, 3, asize,
radius_int, asize, delta_rot,
n_segments_ann, verbose=False)[0]
indices = get_annulus_segments(cube[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann)
ind_opt = get_annulus_segments(cube[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann,
optim_scale_fact=2)
ayxyx = [inner_radius_ann,pa_threshold, indices[0][0], indices[0][1],
ind_opt[0][0], ind_opt[0][1]]
matrix_res, ind_ref, coef, yy, xx = _leastsq_patch(ayxyx,
angle_list,scale_list,fwhm,cube,ann_center,'manhattan', 100,delta_sep,
'lstsq', Tol,formod=True,psf=psf)
if ann==1:
ind_ref_list=ind_ref
coef_list=coef
cube_res[:, yy, xx] = matrix_res
return cube_res, ind_ref_list,coef_list
def nmf_adisdi(cube, angle_list,scale_list=None, cube_ref=None, ncomp=1, scaling=None, max_iter=100,
random_state=None, mask_center_px=None, imlib='opencv',
interpolation='lanczos4', collapse='median', full_output=False,
verbose=True, **kwargs):
""" Non Negative Matrix Factorization for ADI or ADI+SDI sequences.This function embeds the
scikit-learn NMF algorithm solved through coordinate descent method.
"""
array,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
n, y, x = array.shape
matrix = prepare_matrix(array, scaling, mask_center_px, mode='fullfr',
verbose=verbose)
matrix += np.abs(matrix.min())
if cube_ref is not None:
matrix_ref = prepare_matrix(cube_ref, scaling, mask_center_px,
mode='fullfr', verbose=verbose)
matrix_ref += np.abs(matrix_ref.min())
mod = NMF_sklearn(n_components=ncomp, alpha=0, solver='cd', init='nndsvd',
max_iter=max_iter, random_state=random_state, **kwargs)
# H [ncomp, n_pixels]: Non-negative components of the data
if cube_ref is not None:
H = mod.fit(matrix_ref).components_
else:
H = mod.fit(matrix).components_
# W: coefficients [n_frames, ncomp]
W = mod.transform(matrix)
reconstructed = np.dot(W, H)
residuals = matrix - reconstructed
array_out = np.zeros_like(array)
for i in range(n):
array_out[i] = residuals[i].reshape(y,x)
cube_der=rot_scale('fin',cube,array_out,angle_list_t,scale_list_t, imlib, interpolation)
frame_fin = cube_collapse(cube_der, mode=collapse)
return cube_der,frame_fin
def annular_NMF(cube, angle_list, nann=None, local=False, fwhm=4, asize=2, n_segments=1, ncomp=20,imlib='opencv', interpolation='lanczos4', collapse='median',max_iter=100,
random_state=None,full_output=False, verbose=False):
"""
Function allowing the estimation of the cube of residuals after
the subtraction of the speckle field modeled via the NMF framework.
This codes is an adaptation of the VIP NMF function to the case of annular
computation of the modeled speckle fields
(only full-frame estimation in Gonzalez et al. AJ, 154:7,2017)
"""
array = cube
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array')
if array.shape[0] != angle_list.shape[0]:
raise TypeError('Input vector or parallactic angles has wrong length')
n, y, _ = array.shape
angle_list = check_pa_vector(angle_list)
if asize is None:
annulus_width = int(np.ceil(2 * fwhm))
elif isinstance(asize, int):
annulus_width = asize
# Annulus parametrization
radius_int=fwhm
if local==True:
if nann> 2*annulus_width:
n_annuli = 5
radius_int=(nann//annulus_width-2)*annulus_width
else:
n_annuli = 4
radius_int=(nann//annulus_width-1)*annulus_width
else:
n_annuli = int((y / 2 - radius_int) / asize)
# Definition of the annuli and the corresponding parralactic angle threshold
cube_out = np.zeros_like(array)
for ann in range(n_annuli):
inner_radius = radius_int + ann * annulus_width
if verbose:
print('{} : in_rad={}'.format(ann+1, inner_radius))
theta_init = 90
indices = get_annulus_segments(array[0], inner_radius, annulus_width,n_segments,theta_init)
# Computation of the speckle field for the different frames and estimation of the cube of residuals
for j in range(n_segments):
cube_out[:,indices[j][0], indices[j][1]] =NMF_patch(array[:, indices[j][0], indices[j][1]], ncomp, max_iter,random_state,verbose)
# Cube is derotated according to the parallactic angle and collapsed
cube_der = cube_derotate(cube_out, angle_list, imlib=imlib,interpolation=interpolation)
frame = cube_collapse(cube_der, mode=collapse)
if full_output:
return cube_out, cube_der, frame
else:
return frame
def NMF_patch(matrix, ncomp, max_iter,random_state,sklearn=False):
"""
Function allowing the computation via NMF of the speckle field for a
given sub-region of the original ADI sequence. The code is a partial reproduction of
the VIP function NMF_patch (Gonzalez et al. AJ, 154:7,2017)
"""
refs = matrix+ np.abs(matrix.min())
if sklearn==True:
mod = NMF_sklearn(n_components=ncomp, alpha=0, solver='cd', init='nndsvd',
max_iter=max_iter, random_state=random_state)
# H [ncomp, n_pixels]: Non-negative components of the data
H = mod.fit(refs).components_
W = mod.transform(refs)
reconstructed = np.dot(W, H)
else:
mod = NMF(X=refs, n_components=ncomp)
mod.SolveNMF(maxiters=max_iter, tol=0.001)
H=mod.H
W=mod.W
reconstructed = np.dot(W, H)
residuals = refs - reconstructed
return residuals
def NMF_patch_range(matrix, ncomp_range, max_iter,random_state,verbose):
"""
Function allowing the computation via NMF of the speckle field for a range of principal
components ncomp_range and a given sub-region of the original ADI sequence. The code is a
partial reproduction of the VIP function NMF_patch (Gonzalez et al. AJ, 154:7,2017)
"""
refs = matrix+ np.abs(matrix.min())
mod = NMF(X=refs, n_components=ncomp_range[len(ncomp_range)-1])
mod.SolveNMF(maxiters=max_iter, tol=0.001)
if verbose:
print('Done NMF with sklearn.NMF.')
residuals=[]
for i in ncomp_range:
H=mod.H[ncomp_range[0]:i,:]
W=mod.W[:,ncomp_range[0]:i]
reconstructed = np.dot(W, H)
residuals.append(refs - reconstructed)
return residuals
def annular_pca_adisdi(cube, angle_list,scale_list=None, radius_int=0, fwhm=4, asize=2, n_segments=1,
delta_rot=1,delta_sep=0.1, ncomp=1, svd_mode='lapack', nproc=None,
min_frames_lib=2, max_frames_lib=200, tol=1e-1, scaling=None,
imlib='opencv', interpolation='lanczos4', collapse='median',
full_output=False, verbose=False, cube_ref=None, weights=None):
""" PCA exploiting angular and spectral variability (ADI or ADI+SDI fashion).
"""
array,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
n, y, _ = array.shape
angle_list_t = check_pa_vector(angle_list_t)
n_annuli = int((y / 2 - radius_int) / asize)
if isinstance(delta_rot, tuple):
delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli)
elif isinstance(delta_rot, (int, float)):
delta_rot = [delta_rot] * n_annuli
if isinstance(n_segments, int):
n_segments = [n_segments for _ in range(n_annuli)]
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2) # for first annulus
n_segments.append(3) # for second annulus
ld = 2 * np.tan(360 / 4 / 2) * asize
for i in range(2, n_annuli): # rest of annuli
radius = i * asize
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360 / ang)))
if verbose:
msg = 'N annuli = {}, FWHM = {:.3f}'
print(msg.format(n_annuli, fwhm))
print('PCA per annulus (or annular sectors):')
if nproc is None: # Hyper-threading "duplicates" the cores -> cpu_count/2
nproc = cpu_count() // 2
# The annuli are built, and the corresponding PA thresholds for frame
# rejection are calculated (at the center of the annulus)
cube_out = np.zeros_like(array)
for ann in range(n_annuli):
if isinstance(ncomp, tuple) or isinstance(ncomp, np.ndarray):
if len(ncomp) == n_annuli:
ncompann = ncomp[ann]
else:
raise TypeError('If `ncomp` is a tuple, it must match the '
'number of annuli')
else:
ncompann = ncomp
n_segments_ann = n_segments[ann]
res_ann_par = _define_annuli(angle_list_t, ann, n_annuli, fwhm,
radius_int, asize, delta_rot[ann],
n_segments_ann, verbose)
pa_thr, inner_radius, ann_center = res_ann_par
indices = get_annulus_segments(array[0], inner_radius, asize,
n_segments_ann)
# Library matrix is created for each segment and scaled if needed
for j in range(n_segments_ann):
yy = indices[j][0]
xx = indices[j][1]
matrix_segm = array[:, yy, xx] # shape [nframes x npx_segment]
if cube_ref is not None:
matrix_segm_ref = cube_ref[:, yy, xx]
else:
matrix_segm_ref = None
res = pool_map(nproc, do_pca_patch, matrix_segm, iterable(range(n)),
angle_list_t,scale_list_t, fwhm, pa_thr,delta_sep, ann_center, svd_mode,
ncompann, min_frames_lib, max_frames_lib, tol,
matrix_segm_ref)
res = np.array(res)
residuals = np.array(res[:, 0])
for fr in range(n):
cube_out[fr][yy, xx] = residuals[fr]
# Cube is derotated according to the parallactic angle and collapsed
cube_der=rot_scale('fin',cube,cube_out,angle_list_t,scale_list_t, imlib, interpolation)
frame = cube_collapse(cube_der, mode=collapse)
return cube_der, frame
def do_pca_patch(matrix, frame, angle_list,scale_list, fwhm, pa_threshold, delta_sep, ann_center,
svd_mode, ncomp, min_frames_lib, max_frames_lib, tol,
matrix_ref):
"""
Function doing the SVD/PCA for each frame patch. The code is a partial reproduction of
the VIP function do_pca_patch (Gonzalez et al. AJ, 154:7,2017)
"""
if scale_list is not None:
indices_left = np.intersect1d(_find_indices_adi(angle_list, frame,
pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, frame,
fwhm, delta_sep))
else:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
data_ref = matrix[indices_left]
if matrix_ref is not None:
# Stacking the ref and the target ref (pa thresh) libraries
data_ref = np.vstack((matrix_ref, data_ref))
curr_frame = matrix[frame] # current frame
V = get_eigenvectors(ncomp, data_ref, svd_mode, noise_error=tol)
transformed = np.dot(curr_frame, V.T)
reconstructed = np.dot(transformed.T, V)
residuals = curr_frame - reconstructed
return residuals, V.shape[0], data_ref.shape[0]
def do_pca_patch_range(matrix, frame, angle_list,scale_list, fwhm, pa_threshold,delta_sep, ann_center,
svd_mode, ncomp_range, min_frames_lib, max_frames_lib, tol,
matrix_ref):
"""
Function doing the SVD/PCA for each frame patch for a range of principal
component ncomp_range. The code is a partial reproduction of
the VIP function do_pca_patch (Gonzalez et al. AJ, 154:7,2017)
"""
if scale_list is not None:
indices_left = np.intersect1d(_find_indices_adi(angle_list, frame,
pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, frame,
fwhm, delta_sep))
else:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
data_ref = matrix[indices_left]
if matrix_ref is not None:
# Stacking the ref and the target ref (pa thresh) libraries
data_ref = np.vstack((matrix_ref, data_ref))
curr_frame = matrix[frame] # current frame
V = get_eigenvectors(ncomp_range[len(ncomp_range)-1], data_ref, svd_mode, noise_error=tol)
residuals=[]
for i in ncomp_range:
V_trunc=V[ncomp_range[0]:i,:]
transformed = np.dot(curr_frame, V_trunc.T)
reconstructed = np.dot(transformed.T, V_trunc)
residuals.append(curr_frame - reconstructed)
return residuals, V.shape[0], data_ref.shape[0]
def loci_adisdi(cube, angle_list,scale_list=None, fwhm=4, metric='manhattan',
dist_threshold=50, delta_rot=0.5,delta_sep=0.1, radius_int=0, asize=4,
n_segments=1, nproc=1, solver='lstsq', tol=1e-3,
optim_scale_fact=1, imlib='opencv', interpolation='lanczos4',
collapse='median', nann=None,local=False, verbose=True, full_output=False):
""" Least-squares model PSF subtraction for ADI or ADI+SDI. This code is an adaptation of the VIP
xloci function to provide, if required, the residuals after speckle field subtraction
for a given annulus.
"""
cube_rot_scale,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
y = cube_rot_scale.shape[1]
if not asize < y // 2:
raise ValueError("asize is too large")
angle_list = check_pa_vector(angle_list)
if local==True:
n_annuli = 3
radius_int=nann-asize
else:
n_annuli= int((y / 2 - radius_int) / asize)
if verbose:
print("Building {} annuli:".format(n_annuli))
if isinstance(delta_rot, tuple):
delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli)
elif isinstance(delta_rot, (int, float)):
delta_rot = [delta_rot] * n_annuli
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
annulus_width = asize
if isinstance(n_segments, int):
n_segments = [n_segments]*n_annuli
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2) # for first annulus
n_segments.append(3) # for second annulus
ld = 2 * np.tan(360/4/2) * annulus_width
for i in range(2, n_annuli): # rest of annuli
radius = i * annulus_width
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360/ang)))
# annulus-wise least-squares combination and subtraction
cube_res = np.zeros_like(cube_rot_scale)
ayxyx = [] # contains per-segment data
for ann in range(n_annuli):
n_segments_ann = n_segments[ann]
inner_radius_ann = radius_int + ann*annulus_width
# angles
pa_threshold = _define_annuli(angle_list, ann, n_annuli, fwhm,
radius_int, asize, delta_rot[ann],
n_segments_ann, verbose)[0]
# indices
indices = get_annulus_segments(cube_rot_scale[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann)
ind_opt = get_annulus_segments(cube_rot_scale[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann,
optim_scale_fact=optim_scale_fact)
# store segment data for multiprocessing
ayxyx += [(inner_radius_ann+asize//2,pa_threshold, indices[nseg][0], indices[nseg][1],
ind_opt[nseg][0], ind_opt[nseg][1]) for nseg in
range(n_segments_ann)]
msg = 'Patch-wise least-square combination and subtraction:'
# reverse order of processing, as outer segments take longer
res_patch = pool_map(nproc, _leastsq_patch, iterable(ayxyx[::-1]),
angle_list_t,scale_list_t,fwhm,cube_rot_scale, None, metric, dist_threshold,delta_sep,
solver, tol, verbose=verbose, msg=msg,
progressbar_single=True)
for patch in res_patch:
matrix_res, yy, xx = patch
cube_res[:, yy, xx] = matrix_res
cube_der=rot_scale('fin',cube,cube_res,angle_list_t,scale_list_t, imlib, interpolation)
frame_der_median = cube_collapse(cube_der, collapse)
if verbose:
print('Done processing annuli')
return cube_der, frame_der_median
def _leastsq_patch(ayxyx, angle_list,scale_list,fwhm,cube, nann,metric, dist_threshold,delta_sep,
solver, tol,formod=False,psf=None):
"""
Function allowing th estimation of the optimal factors for the modeled speckle field
estimation via the LOCI framework. The code has been developped based on the VIP
python function _leastsq_patch, but return additionnaly the set of coefficients used for
the speckle field computation.
"""
ann_center,pa_threshold, yy, xx, yy_opti, xx_opti = ayxyx
ind_ref_list=[]
coef_list=[]
yy_opt=[]
xx_opt=[]
for j in range(0,len(yy_opti)):
if not any(x in np.where(yy==yy_opti[j])[0] for x in np.where(xx==xx_opti[j])[0]):
xx_opt.append(xx_opti[j])
yy_opt.append(yy_opti[j])
values = cube[:, yy, xx]
matrix_res = np.zeros((values.shape[0], yy.shape[0]))
values_opt = cube[:, yy_opti, xx_opti]
n_frames = cube.shape[0]
for i in range(n_frames):
if scale_list is not None:
ind_fr_i = np.intersect1d(_find_indices_adi(angle_list, i,
pa_threshold, truncate=False),_find_indices_sdi(scale_list, ann_center, i,
fwhm, delta_sep))
else:
ind_fr_i = _find_indices_adi(angle_list, i,
pa_threshold, truncate=False)
if len(ind_fr_i) > 0:
A = values_opt[ind_fr_i]
b = values_opt[i]
if solver == 'lstsq':
coef = np.linalg.lstsq(A.T, b, rcond=tol)[0] # SVD method
elif solver == 'nnls':
coef = sp.optimize.nnls(A.T, b)[0]
elif solver == 'lsq':
coef = sp.optimize.lsq_linear(A.T, b, bounds=(0, 1),
method='trf',
lsq_solver='lsmr')['x']
else:
raise ValueError("`solver` not recognized")
else:
msg = "No frames left in the reference set. Try increasing "
msg += "`dist_threshold` or decreasing `delta_rot`."
raise RuntimeError(msg)
if formod==True:
ind_ref_list.append(ind_fr_i)
coef_list.append(coef)
recon = np.dot(coef, values[ind_fr_i])
matrix_res[i] = values[i] - recon
if formod==True:
return matrix_res,ind_ref_list,coef_list, yy, xx,
else:
return matrix_res, yy,xx
def llsg_adisdi(cube, angle_list,scale_list, fwhm, rank=10, thresh=1, max_iter=10,
low_rank_ref=False, low_rank_mode='svd', auto_rank_mode='noise',
residuals_tol=1e-1, cevr=0.9, thresh_mode='soft', nproc=1,
asize=None, n_segments=4, azimuth_overlap=None, radius_int=None,
random_seed=None, imlib='opencv', interpolation='lanczos4',
high_pass=None, collapse='median', full_output=True, verbose=True,
debug=False):
""" Local low rank plus Gaussian PSF subtraction for ADI or ADI+SDI. This
code is an adaptation of the VIP llsg function.
"""
cube_rot_scale,angle_list_t,scale_list_t=rot_scale('ini',cube,None,angle_list,scale_list,imlib, interpolation)
list_l, list_s, list_g, f_l, frame_fin, f_g = vip.llsg.llsg(cube_rot_scale, angle_list_t, fwhm, rank=rank,asize=asize, thresh=1,n_segments=n_segments, max_iter=40, random_seed=10, nproc=nproc,full_output=True,verbose=False)
res_s=np.array(list_s)
residuals_cube_=cube_derotate(res_s[0],-angle_list_t)
cube_der=rot_scale('fin',cube,residuals_cube_,angle_list_t,scale_list_t, imlib, interpolation)
frame_fin=cube_collapse(cube_der, collapse)
return cube_der,frame_fin
def _decompose_patch(indices, i_patch,cube_init, n_segments_ann, rank, low_rank_ref,
low_rank_mode, thresh, thresh_mode, max_iter,
auto_rank_mode, cevr, residuals_tol, random_seed,
debug=False, full_output=False):
""" Patch decomposition from the LLSG VIP function.
"""
j = i_patch
yy = indices[j][0]
xx = indices[j][1]
data_segm = cube_init[:, yy, xx]
if low_rank_ref:
ref_segments = list(range(n_segments_ann))
ref_segments.pop(j)
for m, n in enumerate(ref_segments):
if m == 0:
yy_ref = indices[n][0]
xx_ref = indices[n][1]
else:
yy_ref = np.hstack((yy_ref, indices[n][0]))
xx_ref = np.hstack((xx_ref, indices[n][1]))
data_ref = cube_init[:, yy_ref, xx_ref]
else:
data_ref = data_segm
patch = _patch_rlrps(data_segm, data_ref, rank, low_rank_ref,
low_rank_mode, thresh, thresh_mode,
max_iter, auto_rank_mode, cevr,
residuals_tol, random_seed, debug=debug,
full_output=full_output)
return patch
_largenumber = 1E100
_smallnumber = 1E-5
class NMF:
"""
Nonnegative Matrix Factorization - Build a set of nonnegative basis components given
a dataset with Heteroscedastic uncertainties and missing data with a vectorized update rule.
Algorithm:
-- Iterative multiplicative update rule
Input:
-- X: m x n matrix, the dataset
Optional Input/Output:
-- n_components: desired size of the basis set, default 5
-- V: m x n matrix, the weight, (usually) the inverse variance
-- M: m x n binary matrix, the mask, False means missing/undesired data
-- H: n_components x n matrix, the H matrix, usually interpreted as the coefficients
-- W: m x n_components matrix, the W matrix, usually interpreted as the basis set
Comments:
-- Between W and H, which one is the basis set and which one is the coefficient
depends on how you interpret the data, because you can simply transpose everything
as in X-WH versus X^T - (H^T)(W^T)
-- Everything needs to be non-negative
References:
-- Guangtun Ben Zhu, 2016
A Vectorized Algorithm for Nonnegative Matrix Factorization with
Heteroskedastic Uncertainties and Missing Data
AJ/PASP, (to be submitted)
-- Blanton, M. and Roweis, S. 2007
K-corrections and Filter Transformations in the Ultraviolet, Optical, and Near-infrared
The Astronomical Journal, 133, 734
-- Lee, D. D., & Seung, H. S., 2001
Algorithms for non-negative matrix factorization
Advances in neural information processing systems, pp. 556-562
"""
def __init__(self, X, W=None, H=None, V=None, M=None, n_components=5):
"""
Initialization
Required Input:
X -- the input data set
Optional Input/Output:
-- n_components: desired size of the basis set, default 5
-- V: m x n matrix, the weight, (usually) the inverse variance
-- M: m x n binary matrix, the mask, False means missing/undesired data
-- H: n_components x n matrix, the H matrix, usually interpreted as the coefficients
-- W: m x n_components matrix, the W matrix, usually interpreted as the basis set
"""
# I'm making a copy for the safety of everything; should not be a bottleneck
self.X = np.copy(X)
if (np.count_nonzero(self.X<0)>0):
print("There are negative values in X. Setting them to be zero...", flush=True)
self.X[self.X<0] = 0.
self.n_components = n_components
self.maxiters = 100
self.tol = _smallnumber
np.random.seed(10)
if (W is None):
self.W = np.random.rand(self.X.shape[0], self.n_components)
else:
if (W.shape != (self.X.shape[0], self.n_components)):
raise ValueError("Initial W has wrong shape.")
self.W = np.copy(W)
if (np.count_nonzero(self.W<0)>0):
print("There are negative values in W. Setting them to be zero...", flush=True)
self.W[self.W<0] = 0.
if (H is None):
self.H = np.random.rand(self.n_components, self.X.shape[1])
else:
if (H.shape != (self.n_components, self.X.shape[1])):
raise ValueError("Initial H has wrong shape.")
self.H = np.copy(H)
if (np.count_nonzero(self.H<0)>0):
print("There are negative values in H. Setting them to be zero...", flush=True)
self.H[self.H<0] = 0.
if (V is None):
self.V = np.ones(self.X.shape)
else:
if (V.shape != self.X.shape):
raise ValueError("Initial V(Weight) has wrong shape.")
self.V = np.copy(V)
if (np.count_nonzero(self.V<0)>0):
print("There are negative values in V. Setting them to be zero...", flush=True)
self.V[self.V<0] = 0.
if (M is None):
self.M = np.ones(self.X.shape, dtype=np.bool)
else:
if (M.shape != self.X.shape):
raise ValueError("M(ask) has wrong shape.")
if (M.dtype != np.bool):
raise TypeError("M(ask) needs to be boolean.")
self.M = np.copy(M)
# Set masked elements to be zero
self.V[(self.V*self.M)<=0] = 0
self.V_size = np.count_nonzero(self.V)
@property
def cost(self):
"""
Total cost of a given set s
"""
diff = self.X - np.dot(self.W, self.H)
chi2 = np.einsum('ij,ij', self.V*diff, diff)/self.V_size
return chi2
def SolveNMF(self, W_only=False, H_only=False, maxiters=None, tol=None):
"""
Construct the NMF basis
Keywords:
-- W_only: Only update W, assuming H is known
-- H_only: Only update H, assuming W is known
-- Only one of them can be set
Optional Input:
-- tol: convergence criterion, default 1E-5
-- maxiters: allowed maximum number of iterations, default 1000
Output:
-- chi2: reduced final cost
-- time_used: time used in this run
"""
if (maxiters is not None):
self.maxiters = maxiters
if (tol is not None):
self.tol = tol
chi2 = self.cost
oldchi2 = _largenumber
if (W_only and H_only):
return (chi2, 0.)
V = np.copy(self.V)
VT = V.T
#XV = self.X*self.V
XV = np.multiply(V, self.X)
XVT = np.multiply(VT, self.X.T)
niter = 0
while (niter < self.maxiters) and ((oldchi2-chi2)/oldchi2 > self.tol):
# Update H
if (not W_only):
H_up = np.dot(XVT, self.W)
WHVT = np.multiply(VT, np.dot(self.W, self.H).T)
H_down = np.dot(WHVT, self.W)
self.H = self.H*H_up.T/H_down.T
# Update W
if (not H_only):
W_up = np.dot(XV, self.H.T)
WHV = np.multiply(V, np.dot(self.W, self.H))
W_down = np.dot(WHV, self.H.T)
self.W = self.W*W_up/W_down
# chi2
oldchi2 = chi2
chi2 = self.cost
return
| 38.590982 | 242 | 0.614542 | 6,273 | 0.130878 | 0 | 0 | 221 | 0.004611 | 0 | 0 | 12,292 | 0.256457 |
733a51b0598b93f7ddad878e61c9f58e36f463d6 | 4,618 | py | Python | src/custom_dataset.py | devJWSong/transformer-multiturn-dialogue-pytorch | 4ddedaef45f31d75e88bdb909a4451173faec4c8 | [
"MIT"
]
| 11 | 2021-03-22T10:22:42.000Z | 2021-09-15T23:50:46.000Z | src/custom_dataset.py | devjwsong/transformer-chatbot-pytorch | 4ddedaef45f31d75e88bdb909a4451173faec4c8 | [
"MIT"
]
| 1 | 2021-12-10T04:52:39.000Z | 2021-12-10T04:52:40.000Z | src/custom_dataset.py | devjwsong/transformer-chatbot-pytorch | 4ddedaef45f31d75e88bdb909a4451173faec4c8 | [
"MIT"
]
| null | null | null | from torch.utils.data import Dataset
from tqdm import tqdm
import torch
import pickle
import json
class CustomDataset(Dataset):
def __init__(self, args, tokenizer, data_type):
assert data_type in ["train", "valid", "test"]
print(f"Loading {data_type} data...")
with open(f"{args.task_dir}/{data_type}.pickle", "rb") as f:
dials = pickle.load(f)
with open(f"{args.task_dir}/data_info.json", "r") as f:
data_info = json.load(f)
self.src_idxs = [] # (N, T, S_L)
self.num_valid_turns = [] # (N)
self.trg_idxs = [] # (N, T_L)
max_pers = data_info["max_num_pers"]
num_contexts = max_pers + args.max_turns
for dial in tqdm(dials):
hists = []
persona1, persona2, turns = dial['persona1'], dial['persona2'], dial['turns']
pers = [] # The system's persona will be handled as extra histories without a speacker token. (or maybe empty...)
for per in persona2:
token_idxs = [args.bos_id] + tokenizer.encode(per) + [args.eos_id]
pers.append(token_idxs)
for t, turn in enumerate(turns):
if t % 2 == 0: # Speaker 1: User
token_idxs = [args.bos_id, args.sp1_id] + tokenizer.encode(turn) + [args.eos_id]
else: # Speacker 2: System
token_idxs = [args.bos_id, args.sp2_id] + tokenizer.encode(turn) + [args.eos_id]
hists.append(token_idxs)
hists = [self.trunc(token_idxs, args.src_max_len, args.eos_id) for token_idxs in hists]
if len(pers) > 0:
pers = [self.trunc(token_idxs, args.src_max_len, args.eos_id) for token_idxs in pers]
for i in range(len(hists)):
if i % 2 == 1:
self.trg_idxs.append(hists[i])
start, end = i-args.max_turns, i
if start < 0:
start = 0
context = hists[start:end]
assert len(context) > 0
if len(pers) > 0:
context = pers + context
self.num_valid_turns.append(len(context))
if len(context) < num_contexts:
num_extras = num_contexts - len(context)
context += [[args.bos_id, args.eos_id]] * num_extras
assert len(context) == num_contexts
self.src_idxs.append(context)
# Padding
for c, context in enumerate(self.src_idxs):
for i, utter in enumerate(self.src_idxs[c]):
token_idxs = self.src_idxs[c][i]
self.src_idxs[c][i] = self.padding(token_idxs, args.src_max_len, args.pad_id)
assert len(self.src_idxs) == len(self.trg_idxs)
assert len(self.src_idxs) == len(self.num_valid_turns)
def __len__(self):
return len(self.src_idxs)
def __getitem__(self, idx):
return self.src_idxs[idx], self.num_valid_turns[idx], self.trg_idxs[idx]
def padding(self, token_idxs, max_len, pad_id):
num_extras = max_len - len(token_idxs)
token_idxs += [pad_id] * num_extras
return token_idxs
def trunc(self, token_idxs, max_len, eos_id):
token_idxs = token_idxs[:max_len]
token_idxs[-1] = eos_id
return token_idxs
class PadCollate():
def __init__(self, pad_id):
self.pad_id = pad_id
def pad_collate(self, batch):
src_idxs, num_valid_turns, trg_idxs = [], [], []
for seqs in batch:
src_idxs.append(seqs[0])
num_valid_turns.append(seqs[1])
trg_idxs.append(torch.LongTensor(seqs[2]))
trg_idxs = torch.nn.utils.rnn.pad_sequence(trg_idxs, batch_first=True, padding_value=self.pad_id) # (B, T_L)
try:
return torch.LongTensor(src_idxs).contiguous(), torch.LongTensor(num_valid_turns).contiguous(), trg_idxs.contiguous()
except:
print(f"batch size: {len(src_idxs)}")
for b in range(len(src_idxs)):
print(f"num turns: {len(src_idxs[b])}")
print(f"batch size: {len(num_valid_turns)}")
print(num_valid_turns)
print(trg_idxs.shape)
exit() | 38.483333 | 129 | 0.529017 | 4,506 | 0.975747 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.098311 |
733ab2bdfefcfa168386562dc21d727ee4511840 | 1,588 | py | Python | research/codec/codec_example.py | FXTD-ODYSSEY/QBinder | b4b288e7c0ef09d2382e3d6678a5c41950257b76 | [
"MIT"
]
| 13 | 2020-11-29T15:02:57.000Z | 2022-02-11T03:12:25.000Z | research/codec/codec_example.py | FXTD-ODYSSEY/QBinder | b4b288e7c0ef09d2382e3d6678a5c41950257b76 | [
"MIT"
]
| 8 | 2020-11-30T02:47:56.000Z | 2021-05-19T03:44:16.000Z | research/codec/codec_example.py | FXTD-ODYSSEY/QtConfig | 978cddf26c0305677b65b04d206138970cb73762 | [
"MIT"
]
| 2 | 2020-11-30T01:59:19.000Z | 2021-12-17T06:44:54.000Z | # -*- coding: future_fstrings -*-
import codecs
import pdb
import string
# NOTE https://stackoverflow.com/questions/38777818/how-do-i-properly-create-custom-text-codecs
# prepare map from numbers to letters
_encode_table = {str(number): bytes(letter) for number, letter in enumerate(string.ascii_lowercase)}
# prepare inverse map
_decode_table = {v: k for k, v in _encode_table.items()}
def custom_encode(text):
# example encoder that converts ints to letters
print "custom_encode",text
# see https://docs.python.org/3/library/codecs.html#codecs.Codec.encode
return b''.join(_encode_table[x] for x in text), len(text)
def custom_decode(binary):
# example decoder that converts letters to ints
print "custom_decode",binary
# see https://docs.python.org/3/library/codecs.html#codecs.Codec.decode
return ''.join(_decode_table[x] for x in binary), len(binary)
def custom_search_function(encoding_name):
return codecs.CodecInfo(encode=custom_encode, decode=custom_decode, name='Reasons')
def main():
# register your custom codec
# note that CodecInfo.name is used later
codecs.register(custom_search_function)
binary = 'abcdefg'
# decode letters to numbers
pdb.set_trace()
text = binary.decode('Reasons')
print(text)
# encode numbers to letters
binary2 = text.encode('Reasons')
print(binary2)
# fstring = 'f"hello {text}"'.decode('future-fstrings')
# print fstring
# encode(decode(...)) should be an identity function
assert binary == binary2
if __name__ == '__main__':
main() | 28.872727 | 100 | 0.714736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 747 | 0.470403 |
733b44cae4895b7b97c2632f68beb2990e9371cb | 370 | py | Python | benchmark_python_lkml.py | Ladvien/rust_lookml_parser | a99a9663f2e0ccd0e7eff0fb6ec4f4496032265c | [
"MIT"
]
| null | null | null | benchmark_python_lkml.py | Ladvien/rust_lookml_parser | a99a9663f2e0ccd0e7eff0fb6ec4f4496032265c | [
"MIT"
]
| null | null | null | benchmark_python_lkml.py | Ladvien/rust_lookml_parser | a99a9663f2e0ccd0e7eff0fb6ec4f4496032265c | [
"MIT"
]
| null | null | null | import lkml
from time import time_ns
from rich import print
FILE_PATH = "/Users/ladvien/rusty_looker/src/resources/test.lkml"
with open(FILE_PATH, "r") as f:
lookml = f.read()
startTime = time_ns() // 1_000_000
result = lkml.load(lookml)
print(result)
executionTime = (time_ns() // 1_000_000) - startTime
print('Execution time in seconds: ' + str(executionTime)) | 26.428571 | 65 | 0.735135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.22973 |
733c41f200ce9ccff635234faca97343a23e5190 | 1,595 | py | Python | Linear_Regression.py | svdeepak99/TSA-Twitter_Sentiment_Analysis | 41c13682ccc110025c5fbd396c0982d54febc6cc | [
"MIT"
]
| null | null | null | Linear_Regression.py | svdeepak99/TSA-Twitter_Sentiment_Analysis | 41c13682ccc110025c5fbd396c0982d54febc6cc | [
"MIT"
]
| null | null | null | Linear_Regression.py | svdeepak99/TSA-Twitter_Sentiment_Analysis | 41c13682ccc110025c5fbd396c0982d54febc6cc | [
"MIT"
]
| null | null | null | from keras.models import Sequential, load_model
from keras.layers import Dense
import csv
import numpy as np
import os
LOAD_MODEL = False
with open("Linear_Regression/Normalized_Attributes.csv", "r", newline='') as fp:
reader = csv.reader(fp)
headings = next(reader)
dataset = np.array(list(reader), dtype=np.float)
with open("Linear_Regression/VADER_Sentiment.csv", "r", newline='') as fp:
reader = csv.reader(fp)
outputs = np.array([x[0] for x in list(reader)])
if os.path.isfile("Linear_Regression/model/regression_full.h5") and LOAD_MODEL:
model = load_model("Linear_Regression/model/regression_full.h5")
else:
model = Sequential()
model.add(Dense(1, input_dim = 33, activation='linear'))
model.compile(loss='mse', optimizer='rmsprop', metrics=['mse'])
model.fit(x=dataset, y=outputs, epochs=40, verbose=1)
model.save("Linear_Regression/model/regression_full.h5")
model.summary()
weights = model.get_weights()
weights_list = []
for i, w in enumerate(weights[0]):
print(f'{i+1}) {headings[i]} : {w[0]}')
weights_list.append([headings[i], w[0]])
print(f'34) BIAS: {weights[1][0]}\n')
weights_list.append(['BIAS', weights[1][0]])
with open("Linear_Regression/Full_weights.csv", "w", newline='') as fp:
writer = csv.writer(fp)
writer.writerows(weights_list)
print(len(weights), len(weights[0]), len(weights[1]))
print(model.predict(dataset[:10]))
print(outputs[:10])
print(np.sum(dataset[0]*np.array([x[0] for x in weights[0]]))+weights[1][0], model.predict(np.array([dataset[0]])))
| 32.55102 | 116 | 0.680251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.226959 |
733c8d8b8ea4cf5eaafe8785802f0c3c067c38ff | 3,141 | py | Python | UserCode/bressler/multibubblescintillationcheck.py | cericdahl/SBCcode | 90a7841a5c1208d64f71a332289d9005a011aa21 | [
"MIT"
]
| 4 | 2018-08-27T18:02:34.000Z | 2020-06-09T21:19:04.000Z | UserCode/bressler/multibubblescintillationcheck.py | SBC-Collaboration/SBC-Analysis | 90a7841a5c1208d64f71a332289d9005a011aa21 | [
"MIT"
]
| null | null | null | UserCode/bressler/multibubblescintillationcheck.py | SBC-Collaboration/SBC-Analysis | 90a7841a5c1208d64f71a332289d9005a011aa21 | [
"MIT"
]
| 4 | 2019-06-20T21:36:26.000Z | 2020-11-10T17:23:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 19:33:02 2021
@author: bressler
"""
import SBCcode as sbc
import numpy as np
import pulse_integrator as pi
import gc
def check_multibub_scintillation(run, event, at0, PMTgain, PMTwindow):
tstart = PMTwindow[0]
t_end= PMTwindow[1]
scintillation_signal = 0
datadir = '/bluearc/storage/SBC-17-data'
e = sbc.DataHandling.GetSBCEvent.GetEvent(datadir+'/'+run,event)
cgate = e["fastDAQ"]["CAMgate"]
fdt = e["fastDAQ"]["time"]
LED_on = [fdt[i] for i in range(len(cgate)) if cgate[i]<-0.5]
look_times = [x for x in LED_on if (x < 0 and abs(x-at0)<tstart)]
#print(str(len(LED_on)/len(fdt)))
if len(look_times)>0:
LED_during_window = True
else:
LED_during_window = False
dcam = np.diff(cgate)
fdt = e["fastDAQ"]["time"]
camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]
pmttracetime = e["PMTtraces"]["t0_sec"][:,0]+e["PMTtraces"]["t0_frac"][:,0]
d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)
pmtalign = d["PMT_trigt0_sec"]+d["PMT_trigt0_frac"]
tracetimes = pmttracetime - pmtalign
i=0 # to match the indexing of the pre-made code I had 1???
candidate = 0
for t in (tracetimes-at0):
# loop through every PMT trace for the event
if t<t_end and t>tstart:
# if the trace time is within 500 microsec before acoustic t0
"""
lastCamOff = 0
for k in range(len(camOffTimes)):
if t+at0 > camOffTimes[k]:
lastCamOff = camOffTimes[k]
elif t+at0 < camOffTimes[k]:
break
if t+at0-lastCamOff > 25e-6:
# if the trace time is more than 25 microseconds away from a camera gate rise
"""
#take abs to get positive area:
trace = np.fabs(e["PMTtraces"]["traces"][i][0])
#if ch0 saturated, stitch in low res channel:
if max(trace) == 128:
trace = pi.stitchTraces(trace,np.fabs(e["PMTtraces"]["traces"][i][1]))
dt = e["PMTtraces"]["dt"][i][0]
#integrate and convert to phe:
[phe,n,totInt,pktimes] = pi.SBC_pulse_integrator_bressler(trace,dt)
if phe != None:
phe /= PMTgain
#keep track of largest candidate:
if phe > candidate:
candidate = phe
i+=1
#i.e. if there is a candidate PMT trace with area greater than zero
if candidate > 0:
scintillation_signal = candidate
gc.collect()
return [LED_during_window, scintillation_signal]
def main():
returned = check_multibub_scintillation('/bluearc/storage/SBC-17-data/20170703_5', 5, -0.1)
if __name__ == "__main__":
main() | 36.523256 | 97 | 0.539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,196 | 0.38077 |
733cfbac832497fb734f7d0cde01792ad3325cd5 | 4,038 | py | Python | app/core/models.py | echosisdev/openmrs-disa-sync | 077331c5b96394f15cc90aba7ce4018e10d5608d | [
"MIT"
]
| null | null | null | app/core/models.py | echosisdev/openmrs-disa-sync | 077331c5b96394f15cc90aba7ce4018e10d5608d | [
"MIT"
]
| null | null | null | app/core/models.py | echosisdev/openmrs-disa-sync | 077331c5b96394f15cc90aba7ce4018e10d5608d | [
"MIT"
]
| null | null | null | from django.db import models
from django.db.models.signals import pre_save, post_save
from core.utils.constants import Constants
from core.utils.data_convertion import DataConversion
class ExcelFile(models.Model):
file_name = models.FileField(upload_to='uploads')
date_created = models.DateTimeField(auto_now_add=True)
activated = models.BooleanField(default=False)
def __str__(self):
return f'File Id{self.id} File name {self.file_name}'
class CsvFile(models.Model):
file_name = models.FileField(upload_to='uploads')
date_uploaded = models.DateTimeField(auto_now_add=True)
activated = models.BooleanField(default=False)
def __str__(self):
return f'File Id{self.id} File name {self.file_name}'
class ViralLoad(models.Model):
laboratory_id = models.CharField(max_length=100, null=True, blank=True)
sector = models.CharField(max_length=30, blank=True, null=True)
number_orig_lab = models.CharField(max_length=100, blank=True, null=True)
province = models.CharField(max_length=100, blank=True, null=True)
district = models.CharField(max_length=100, blank=True, null=True)
health_facility = models.CharField(max_length=100, blank=True, null=True)
patient_name = models.CharField(max_length=100, blank=True, null=True)
gender = models.CharField(max_length=100, blank=True, null=True)
reference = models.CharField(max_length=100, blank=True, null=True)
capture_date = models.DateField(null=True, blank=True)
access_date = models.DateField(null=True, blank=True)
nid = models.CharField(max_length=100, blank=True, null=True)
viral_load = models.CharField(max_length=100, null=True, blank=True)
viral_load_qualitative = models.CharField(
max_length=100, blank=True, null=True)
synced = models.BooleanField(default=False)
formatted_nid = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = 'Viral Load'
verbose_name_plural = 'Viral Loads'
def __str__(self):
return self.patient_name
class Patient(models.Model):
patient_uuid = models.CharField(max_length=500)
#person_id = models.IntegerField()
nid = models.CharField(max_length=100, blank=True, null=True)
patient_name = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return self.patient_name
class Encounter(models.Model):
encounterDatetime = models.DateTimeField(auto_now_add=True)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
encounterType_uuid = models.CharField(
max_length=255, default=Constants().get_uuids().get('encounter_type'))
location_uuid = models.CharField(
max_length=255, default=Constants().get_uuids().get('hpt'))
form_uuid = models.CharField(
max_length=255, default=Constants().get_uuids().get('form'))
synced = models.BooleanField(default=False)
def __str__(self):
return self.patient.name
class Observation(models.Model):
patient = models.ForeignKey(
Patient, on_delete=models.CASCADE)
obsDateTime = models.DateTimeField(auto_now_add=True)
concept = models.CharField(max_length=255)
value_numeric = models.PositiveIntegerField(null=True, blank=True)
value_coded = models.PositiveIntegerField(null=True, blank=True)
value_datetime = models.DateTimeField(null=True, blank=True)
encounter = models.ForeignKey(Encounter, on_delete=models.CASCADE)
location = models.CharField(
max_length=255, default=Constants().get_uuids().get('hpt'))
value = models.CharField(max_length=255)
voided = models.BooleanField(default=False)
synced = models.BooleanField(default=False)
def __str__(self):
return self.id
# def insert_formatted_nid(sender, instance, created, *args, **kwargs):
# if created:
# instance.formatted_nid = DataConversion.format_nid(instance.nid)
# print(instance.formatted_nid)
# post_save.connect(insert_formatted_nid, sender=ViralLoad)
| 38.826923 | 78 | 0.733779 | 3,567 | 0.883358 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.114165 |
733e97b6658e7e2eb8c13752d62cc0a274acaa1f | 1,533 | py | Python | ceibacli/job_schedulers/slurm.py | cffbots/ceiba-cli | 7e77199c1fe919f024c4707b65578fec320713a6 | [
"Apache-2.0"
]
| 2 | 2020-11-10T08:52:15.000Z | 2020-11-10T08:52:17.000Z | ceibacli/job_schedulers/slurm.py | cffbots/ceiba-cli | 7e77199c1fe919f024c4707b65578fec320713a6 | [
"Apache-2.0"
]
| 24 | 2020-09-22T09:58:38.000Z | 2021-01-14T11:02:33.000Z | ceibacli/job_schedulers/slurm.py | cffbots/ceiba-cli | 7e77199c1fe919f024c4707b65578fec320713a6 | [
"Apache-2.0"
]
| 1 | 2022-02-03T13:46:07.000Z | 2022-02-03T13:46:07.000Z | """Interface to the `SLURM job scheduler <https://slurm.schedmd.com/documentation.html>`_
.. autofunction:: create_slurm_script
"""
from pathlib import Path
from typing import Any, Dict, List
from ..utils import Options
def create_slurm_script(opts: Options, jobs: List[Dict[str, Any]], jobs_metadata: List[Options]) -> str:
"""Create a script to run the workflow using the SLURM job schedule."""
slurm_file = Path("launch.sh")
# Get SLURM configuration
scheduler = opts.scheduler
# Use the configuration provided by the user
if scheduler.free_format is not None:
script = scheduler.free_format
else:
script = make_script(opts.scheduler)
# Append command to run the workflow
for meta, job in zip(jobs_metadata, jobs):
input_file = meta.input.absolute().as_posix()
workdir = opts.workdir.absolute().as_posix()
script += f'\ncd {workdir} && {opts.command} {input_file}'
with open(slurm_file, 'w') as handler:
handler.write(script)
return f"sbatch {slurm_file.absolute().as_posix()}"
def make_script(scheduler: Options) -> str:
"""Create a SLURM script using the ``scheduler`` options."""
arguments = {"cpus-per-task", "partition"}
script = f"""#!/bin/bash
#SBATCH -N {scheduler.nodes}
#SBATCH -t {scheduler.wall_time}
"""
# Add optional arguments
for arg in arguments:
value = scheduler.get(arg, None)
if value is not None:
script += f"#SBATCH --{arg} {value}\n"
return script
| 29.480769 | 104 | 0.666014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.413568 |
733ee42203016605540515b9f13fedcc898ddec0 | 5,290 | py | Python | thumbor/url.py | wking/thumbor | 97a55594a67e3cf3b5e7d09cde5944bc821eeb1e | [
"MIT"
]
| null | null | null | thumbor/url.py | wking/thumbor | 97a55594a67e3cf3b5e7d09cde5944bc821eeb1e | [
"MIT"
]
| null | null | null | thumbor/url.py | wking/thumbor | 97a55594a67e3cf3b5e7d09cde5944bc821eeb1e | [
"MIT"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import re
from urllib import quote
class Url(object):
unsafe_or_hash = r'(?:(?:(?P<unsafe>unsafe)|(?P<hash>.+?))/)?'
debug = '(?:(?P<debug>debug)/)?'
meta = '(?:(?P<meta>meta)/)?'
trim = '(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\d+)?)/)?'
crop = '(?:(?P<crop_left>\d+)x(?P<crop_top>\d+):(?P<crop_right>\d+)x(?P<crop_bottom>\d+)/)?'
fit_in = '(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?'
dimensions = '(?:(?P<horizontal_flip>-)?(?P<width>(?:\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\d+|orig))?/)?'
halign = r'(?:(?P<halign>left|right|center)/)?'
valign = r'(?:(?P<valign>top|bottom|middle)/)?'
smart = r'(?:(?P<smart>smart)/)?'
filters = r'(?:filters:(?P<filters>.+?\))/)?'
image = r'(?P<image>.+)'
compiled_regex = None
@classmethod
def regex(cls, has_unsafe_or_hash=True):
reg = ['/?']
if has_unsafe_or_hash:
reg.append(cls.unsafe_or_hash)
reg.append(cls.debug)
reg.append(cls.meta)
reg.append(cls.trim)
reg.append(cls.crop)
reg.append(cls.fit_in)
reg.append(cls.dimensions)
reg.append(cls.halign)
reg.append(cls.valign)
reg.append(cls.smart)
reg.append(cls.filters)
reg.append(cls.image)
return ''.join(reg)
@classmethod
def parse_decrypted(cls, url):
if cls.compiled_regex:
reg = cls.compiled_regex
else:
reg = cls.compiled_regex = re.compile(cls.regex(has_unsafe_or_hash=False))
result = reg.match(url)
if not result:
return None
result = result.groupdict()
int_or_0 = lambda value: 0 if value is None else int(value)
values = {
'debug': result['debug'] == 'debug',
'meta': result['meta'] == 'meta',
'trim': result['trim'],
'crop': {
'left': int_or_0(result['crop_left']),
'top': int_or_0(result['crop_top']),
'right': int_or_0(result['crop_right']),
'bottom': int_or_0(result['crop_bottom'])
},
'adaptive': result['adaptive'] == 'adaptive',
'full': result['full'] == 'full',
'fit_in': result['fit_in'] == 'fit-in',
'width': result['width'] == 'orig' and 'orig' or int_or_0(result['width']),
'height': result['height'] == 'orig' and 'orig' or int_or_0(result['height']),
'horizontal_flip': result['horizontal_flip'] == '-',
'vertical_flip': result['vertical_flip'] == '-',
'halign': result['halign'] or 'center',
'valign': result['valign'] or 'middle',
'smart': result['smart'] == 'smart',
'filters': result['filters'] or '',
'image': 'image' in result and result['image'] or None
}
return values
@classmethod # NOQA
def generate_options(cls,
debug=False,
width=0,
height=0,
smart=False,
meta=False,
trim=None,
adaptive=False,
full=False,
fit_in=False,
horizontal_flip=False,
vertical_flip=False,
halign='center',
valign='middle',
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
filters=None):
url = []
if debug:
url.append('debug')
if meta:
url.append('meta')
if trim:
if isinstance(trim, bool):
url.append('trim')
else:
url.append('trim:%s' % trim)
crop = crop_left or crop_top or crop_right or crop_bottom
if crop:
url.append('%sx%s:%sx%s' % (
crop_left,
crop_top,
crop_right,
crop_bottom
))
if fit_in:
fit_ops = []
if adaptive:
fit_ops.append('adaptive')
if full:
fit_ops.append('full')
fit_ops.append('fit-in')
url.append('-'.join(fit_ops))
if horizontal_flip:
width = '-%s' % width
if vertical_flip:
height = '-%s' % height
if width or height:
url.append('%sx%s' % (width, height))
if halign != 'center':
url.append(halign)
if valign != 'middle':
url.append(valign)
if smart:
url.append('smart')
if filters:
url.append('filters:%s' % filters)
return '/'.join(url)
@classmethod
def encode_url(kls, url):
return quote(url, '/:?%=&()~",\'$')
| 31.117647 | 119 | 0.479017 | 5,001 | 0.945369 | 0 | 0 | 4,201 | 0.79414 | 0 | 0 | 1,416 | 0.267675 |
733f199984f8b993cc317085788a45b7c591ad8b | 1,084 | py | Python | Projects/herdimmunity/Person.py | Tech-at-DU/ACS-1111.1-Object-Oriented-Programming | e4405678282b7fbd78a3b8337116b61083417343 | [
"MIT"
]
| null | null | null | Projects/herdimmunity/Person.py | Tech-at-DU/ACS-1111.1-Object-Oriented-Programming | e4405678282b7fbd78a3b8337116b61083417343 | [
"MIT"
]
| null | null | null | Projects/herdimmunity/Person.py | Tech-at-DU/ACS-1111.1-Object-Oriented-Programming | e4405678282b7fbd78a3b8337116b61083417343 | [
"MIT"
]
| 1 | 2021-11-01T21:04:52.000Z | 2021-11-01T21:04:52.000Z | import random
from Virus import Virus
class Person:
''' The simulation will contain people who will make up a population.'''
def __init__(self, is_vaccinated, infection=None):
''' We start out with is_alive = True
All other values will be set by the simulation through the parameters when
it instantiates each Person object.
'''
self.is_alive = True #boolean
self.is_vaccinated = is_vaccinated #boolean
self.infection = infection #virus object
def did_survive_infection(self):
''' Generate a random number between 0.0 and 1.0 and compare to the virus's
mortality_num. If the random number is smaller, person dies from the disease.
Set the person's is alive attribute to False If Person survives, they become
vaccinated and they have no infection (set the vaccinated attibute to True
and the infection to None) Return True if they survived the infection and
False if they did not.
'''
#TODO: finish this method
pass
| 37.37931 | 86 | 0.665129 | 1,035 | 0.954797 | 0 | 0 | 0 | 0 | 0 | 0 | 763 | 0.703875 |
733fd1e0e78df5be2052554568f783ebcd9a6ad0 | 1,741 | py | Python | Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py | ramonvaleriano/python- | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | [
"MIT"
]
| null | null | null | Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py | ramonvaleriano/python- | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | [
"MIT"
]
| null | null | null | Cursos/Alura/Python3_Avancando_na_orientacao_a_objetos/models_playlist3.py | ramonvaleriano/python- | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | [
"MIT"
]
| null | null | null | class Programa:
def __init__(self, nome, ano):
self._nome = nome.title()
self.ano = ano
self._likes = 0
@property
def likes(self):
return self._likes
def dar_like(self):
self._likes += 1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, novo_nome):
self._nome = novo_nome.title()
def __str__(self):
return f'{self.nome} - {self.ano} - {self.likes}'
class Filme(Programa):
def __init__(self, nome, ano, duracao):
super().__init__(nome, ano)
self.duracao = duracao
def __str__(self):
return f'{self.nome} - {self.ano} - {self.duracao} min - {self.likes}'
class Serie(Programa):
def __init__(self, nome, ano, temporadas):
super(Serie, self).__init__(nome, ano)
self.temporadas = temporadas
def __str__(self):
return f'{self.nome} - {self.ano} - {self.temporadas} temporadas - {self.likes}'
class Playlist:
def __init__(self, nome, programas):
self.nome = nome.title()
self._programas = programas
def __getitem__(self, item):
return self._programas[item]
@property
def listagem(self):
return self._programas
def __len__(self):
return len(self._programas)
vingadores = Filme('Vigadores - Guerra Infinita', 2018, 160)
atlanta = Serie('Atlatan', 2018, 2)
tmep = Filme('Todo mundo em pânico', 1999, 100)
demolidor = Serie('Demolidor', 2016, 2)
filmes_e_series = [vingadores, atlanta, demolidor, tmep]
playlist_fim_de_semana = Playlist('fim de semana', filmes_e_series)
print(f'Tamonho do playlist: {len(playlist_fim_de_semana)}')
for dados in playlist_fim_de_semana:
print(dados) | 24.871429 | 88 | 0.638713 | 1,303 | 0.747991 | 0 | 0 | 258 | 0.148106 | 0 | 0 | 318 | 0.182549 |
73401ec9a9c7c85f7251558930f267232a9f7bb1 | 3,275 | py | Python | blackjack/game.py | cuiqui/blackjack | 5ecb0ae1c065fa284c2209222f6f958e1f514249 | [
"MIT"
]
| null | null | null | blackjack/game.py | cuiqui/blackjack | 5ecb0ae1c065fa284c2209222f6f958e1f514249 | [
"MIT"
]
| null | null | null | blackjack/game.py | cuiqui/blackjack | 5ecb0ae1c065fa284c2209222f6f958e1f514249 | [
"MIT"
]
| null | null | null | import constants as c
from deck import Deck
from player import Human, RandomAI
class Game:
def __init__(self):
self.deck = None
self.players = None
self.scores = None
self.rounds_left = None
self.game_over = False
def new(self):
self.game_over = False
self.rounds_left = c.ROUNDS
self.players = [Human(), RandomAI()]
self.scores = {str(k): 0 for k in self.players}
self.new_round()
def new_round(self):
self.deck = Deck()
self.deck.shuffle()
for player in self.players:
player.hand = []
self.deal(player=player, quantity=c.INITIAL_HAND)
def deal(self, player, quantity=1):
for card in self.deck.draw(quantity):
player.hand.append(card)
def turn(self, player):
score = None
action = player.play()
if action == 'hit':
self.deal(player)
if player.get_score() > c.POINTS:
score = 0
elif action == 'stay':
score = player.get_score()
return score
def balance(self, scores):
print('----- Scores -----')
print(f'Round scores (points made in round): {scores}')
tie = True
winner = scores.popitem()
for k, v in scores.items():
if v > winner[1]:
winner = (k, v)
tie = False
elif v < winner[1]:
tie = False
if not tie:
self.scores[winner[0]] += 1
print(f'General scores (rounds won by each): {self.scores}')
def run(self):
# while there are still rounds left
while self.rounds_left:
# set round scores to empty
scores = {}
# for each player, do a whole turn, which can involve
# multiple actions, i.e., two or more "hits"
for player in self.players:
print(f'---- {str(player)} turn ----')
# turn is not over until we receive a score,
# whether it's 0, which means it overstepped
# or 0 < x <= 21
turn_over = False
while not turn_over:
# do a turn until we get a score, if we don't
# have a score, that means that the engine
# "hit" and didn't overstepped, so it's still
# its turn.
score = self.turn(player)
if score is not None:
print(f'Hand: {[str(e) for e in player.hand]}, points: {player.get_score()}')
# store scores for this player in this round
# and hand control over
scores[str(player)] = score
turn_over = True
# do a balance after finishing round
self.balance(scores)
# begin new round and reduce rounds left by 1
self.new_round()
self.rounds_left -= 1
print(f'Rounds left: {self.rounds_left}')
if __name__ == '__main__':
g = Game()
g.new()
g.run()
| 31.490385 | 102 | 0.487328 | 3,113 | 0.950534 | 0 | 0 | 0 | 0 | 0 | 0 | 846 | 0.258321 |
73407d37b530e40b65a5d94f1bc5d3086355dead | 1,084 | py | Python | numba/tests/__init__.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
]
| 1 | 2019-12-04T07:13:18.000Z | 2019-12-04T07:13:18.000Z | numba/tests/__init__.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
]
| null | null | null | numba/tests/__init__.py | mawanda-jun/numba | 8c6658375c1f8fe50e1a5ccd11d4e7bf5a8053de | [
"BSD-2-Clause",
"Apache-2.0"
]
| 1 | 2020-09-18T15:03:46.000Z | 2020-09-18T15:03:46.000Z | from numba import unittest_support as unittest
import gc
from os.path import dirname, join
import multiprocessing
import sys
import time
import warnings
from unittest.suite import TestSuite
from numba.testing import load_testsuite
from numba.testing import ddt # for backward compatibility
try:
import faulthandler
except ImportError:
faulthandler = None
else:
try:
# May fail in IPython Notebook with UnsupportedOperation
faulthandler.enable()
except Exception as e:
msg = "Failed to enable faulthandler due to:\n{err}"
warnings.warn(msg.format(err=e))
def load_tests(loader, tests, pattern):
suite = TestSuite()
suite.addTests(load_testsuite(loader, dirname(__file__)))
# Numba CUDA tests are located in a separate directory:
cuda_dir = join(dirname(dirname(__file__)), 'cuda/tests')
suite.addTests(loader.discover(cuda_dir))
# Numba ROC tests are located in a separate directory
roc_dir = join(dirname(dirname(__file__)), 'roc/tests')
suite.addTests(loader.discover(roc_dir))
return suite
| 27.1 | 64 | 0.737085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.240775 |
7340e2ed735c34bf4441bf796759a517ee89ee90 | 5,377 | py | Python | src/clustar/fit.py | clustar/Clustar | 83e155feffc10c4bf172f8ec769fb3c5ffe1d579 | [
"MIT"
]
| 4 | 2021-02-24T17:27:25.000Z | 2021-06-28T04:45:32.000Z | src/clustar/fit.py | clustar/Clustar | 83e155feffc10c4bf172f8ec769fb3c5ffe1d579 | [
"MIT"
]
| 3 | 2021-04-05T14:53:26.000Z | 2021-06-27T20:17:14.000Z | src/clustar/fit.py | clustar/Clustar | 83e155feffc10c4bf172f8ec769fb3c5ffe1d579 | [
"MIT"
]
| 1 | 2021-02-15T16:13:05.000Z | 2021-02-15T16:13:05.000Z | """
Clustar module for fitting-related methods.
This module is designed for the 'ClustarData' object. All listed methods take
an input parameter of a 'ClustarData' object and return a 'ClustarData' object
after processing the method. As a result, all changes are localized within the
'ClustarData' object.
Visit <https://clustar.github.io/> for additional information.
"""
from clustar import graph
from scipy import ndimage, stats
from shapely import affinity, geometry
import numpy as np
def compute_fit(cd):
"""
Computes the normalized bivariate gaussian fit for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
i = 0
while i < len(cd.groups):
group = cd.groups[i]
try:
rv = stats.multivariate_normal([group.stats.x_bar,
group.stats.y_bar],
group.stats.covariance_matrix)
except ValueError:
del cd.groups[i]
continue
bvg = rv.pdf(group.image.pos)
bvg *= np.max(group.image.data) / np.max(bvg)
group.res.data = 1 - (bvg / group.image.data)
group.fit.bvg = bvg
group.fit.rv = rv
i += 1
return cd
def compute_ellipse(cd):
"""
Computes the ellipse parameters and localized residuals for the 'Group'
objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
a = group.stats.x_len / 2
b = group.stats.y_len / 2
theta = np.linspace(0, np.pi * 2, 360)
r = a * b / np.sqrt((b * np.cos(theta)) ** 2 +
(a * np.sin(theta)) ** 2)
xy = np.stack([group.stats.x_bar + r * np.cos(theta),
group.stats.y_bar + r * np.sin(theta)], 1)
ellipse = affinity.rotate(geometry.Polygon(xy),
group.stats.degrees,
(group.stats.x_bar, group.stats.y_bar))
pos = np.array([[i, j] for i in range(group.image.data.shape[0])
for j in range(group.image.data.shape[1])])
inside = np.array([p for p in pos
if ellipse.contains(geometry.Point(p))])
outside = np.array([p for p in pos
if not ellipse.contains(geometry.Point(p))])
group.fit.ellipse = ellipse
group.res.pos = pos
group.res.inside = inside
group.res.outside = outside
return cd
def compute_metrics(cd):
"""
Computes the evaluation metrics for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = group.res
output = np.abs(res.data[res.inside[:, 0], res.inside[:, 1]])
output[output < 0] = 0
output[output > 1] = 1
bias = group.image.data[res.inside[:, 0], res.inside[:, 1]]
group.metrics.standard_deviation = np.std(output)
group.metrics.variance = group.metrics.standard_deviation ** 2
group.metrics.average = np.mean(output)
group.metrics.weighted_average = np.average(output, weights=bias)
group.res.output = output
return cd
def compute_peaks(cd):
"""
Computes the number of peaks along the major and minor axes for the
'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = np.array(group.res.data, copy=True)
res_out = group.res.outside
res[res_out[:, 0], res_out[:, 1]] = 0
r_major = np.abs(ndimage.rotate(res, group.stats.degrees))
r_minor = np.abs(ndimage.rotate(res, group.stats.degrees + 90))
major_idx = graph.critical_points(r_major)
minor_idx = graph.critical_points(r_minor)
major_idx = [major_idx[i] for i in range(len(major_idx))
if i % 2 == 0]
minor_idx = [minor_idx[i] for i in range(len(minor_idx))
if i % 2 == 0]
group.fit.major_peaks = len(major_idx)
group.fit.minor_peaks = len(minor_idx)
group.res.clean = res
return cd
def validate(cd):
"""
Determines which 'Group' objects are flagged for manual review by using
the specified validation parameters.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
attribute = cd.params.metric.lower()
threshold = cd.params.threshold
for group in cd.groups:
metric = getattr(group.metrics, attribute)
if metric > threshold:
group.flag = True
cd.flag = True
if cd.params.evaluate_peaks and \
((group.fit.major_peaks in [2, 4]) or
(group.fit.minor_peaks in [2, 4])):
group.flag = False
cd.flag = False
return cd
| 29.382514 | 78 | 0.563139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,641 | 0.305189 |
73411a436e5f2edebb124c6122419fcdeef298b3 | 970 | py | Python | tests/wizard/namedwizardtests/urls.py | felixxm/django-formtools | ba62c6fa14edbd4197bda8ed0d23eb006ebebeba | [
"BSD-3-Clause"
]
| null | null | null | tests/wizard/namedwizardtests/urls.py | felixxm/django-formtools | ba62c6fa14edbd4197bda8ed0d23eb006ebebeba | [
"BSD-3-Clause"
]
| null | null | null | tests/wizard/namedwizardtests/urls.py | felixxm/django-formtools | ba62c6fa14edbd4197bda8ed0d23eb006ebebeba | [
"BSD-3-Clause"
]
| 1 | 2019-11-04T22:52:19.000Z | 2019-11-04T22:52:19.000Z | from django.conf.urls import url
from .forms import (
CookieContactWizard, Page1, Page2, Page3, Page4, SessionContactWizard,
)
def get_named_session_wizard():
return SessionContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_session',
done_step_name='nwiz_session_done'
)
def get_named_cookie_wizard():
return CookieContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_cookie',
done_step_name='nwiz_cookie_done'
)
urlpatterns = [
url(r'^nwiz_session/(?P<step>.+)/$', get_named_session_wizard(), name='nwiz_session'),
url(r'^nwiz_session/$', get_named_session_wizard(), name='nwiz_session_start'),
url(r'^nwiz_cookie/(?P<step>.+)/$', get_named_cookie_wizard(), name='nwiz_cookie'),
url(r'^nwiz_cookie/$', get_named_cookie_wizard(), name='nwiz_cookie_start'),
]
| 32.333333 | 90 | 0.670103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.290722 |
73418fc41479ed48faa479be47ae0461c5d41885 | 907 | py | Python | setup.py | ajayp10/derive_event_pm4py | d1fd16c65081348b617dc0697b372a294c91023a | [
"MIT"
]
| null | null | null | setup.py | ajayp10/derive_event_pm4py | d1fd16c65081348b617dc0697b372a294c91023a | [
"MIT"
]
| null | null | null | setup.py | ajayp10/derive_event_pm4py | d1fd16c65081348b617dc0697b372a294c91023a | [
"MIT"
]
| null | null | null | import pathlib
from setuptools import setup
CURRENT_PATH = pathlib.Path(__file__).parent
README = (CURRENT_PATH/"README.md").read_text()
setup(
name="derive_event_pm4py",
version="1.0.1",
description="It derives new events based on rules provided as inputs.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ajayp10/derive_event_pm4py",
author="Ajay Pandi",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["derive_event"],
include_package_data=True,
install_requires=['pandas', 'numpy', 'pm4py',
],
entry_points={
"console_scripts": [
"derive=derive_event.derive:main",
]
},
) | 28.34375 | 75 | 0.651599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.44763 |
73425bf1b2ce90f77e267345bd3b090b0208b790 | 16,334 | py | Python | tests/service/ai/test_not_killing_itself_ai.py | jonashellmann/informaticup21-team-chillow | f2e519af0a5d9a9368d62556703cfb1066ebb58f | [
"MIT"
]
| 3 | 2021-01-17T23:32:07.000Z | 2022-01-30T14:49:16.000Z | tests/service/ai/test_not_killing_itself_ai.py | jonashellmann/informaticup21-team-chillow | f2e519af0a5d9a9368d62556703cfb1066ebb58f | [
"MIT"
]
| 2 | 2021-01-17T13:37:56.000Z | 2021-04-14T12:28:49.000Z | tests/service/ai/test_not_killing_itself_ai.py | jonashellmann/informaticup21-team-chillow | f2e519af0a5d9a9368d62556703cfb1066ebb58f | [
"MIT"
]
| 2 | 2021-04-02T14:53:38.000Z | 2021-04-20T11:10:17.000Z | import unittest
from datetime import datetime, timezone
from typing import List
from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class NotKillingItselfAITest(unittest.TestCase):
def test_ai_should_choose_the_own_non_killing_itself_action(self):
player1 = Player(1, 0, 0, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself(self):
player1 = Player(1, 0, 1, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself2(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_in_turn_6(self):
player1 = Player(1, 0, 4, Direction.up, 3, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
game_service.turn.turn_ctr = 6
sut = NotKillingItselfAI(player1, [], 4, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(Action.speed_up in actions)
self.assertTrue(len(actions) == 3)
def test_ai_should_not_choose_speed_up_if_max_speed_is_allready_reached(self):
MAX_SPEED = 3
player1 = Player(1, 0, 4, Direction.up, MAX_SPEED, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], MAX_SPEED, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_calc_action_with_max_distance(self):
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_calc_all_action_with_max_distance_with_max_worse_distance(self):
MAX_WORSE_DISTANCE = 1
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, MAX_WORSE_DISTANCE, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.speed_up in actions)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 3)
def test_get_information(self):
player = Player(1, 0, 4, Direction.up, 1, True, "")
sut = NotKillingItselfAI(player, [], 3, 1, 3)
expected = "max_speed=3, max_worse_distance=1, depth=3"
result = sut.get_information()
self.assertEqual(expected, result)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_with_depth_greater_than_one(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_greater_than_one_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_correct_list_with_depth_three_and_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_three_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_best_list_of_actions_by_depth_from_lower_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_by_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_in_lowest_possible_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
| 52.185304 | 120 | 0.53355 | 15,927 | 0.975083 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.006245 |
73441d4a3b24e3d3313825da48a3c91f2e8b65de | 1,123 | py | Python | setup.py | meisanggou/ldapuser | 45a9e5eba8bbf173ce2ec87f9a32cff8db549e7c | [
"MIT"
]
| null | null | null | setup.py | meisanggou/ldapuser | 45a9e5eba8bbf173ce2ec87f9a32cff8db549e7c | [
"MIT"
]
| null | null | null | setup.py | meisanggou/ldapuser | 45a9e5eba8bbf173ce2ec87f9a32cff8db549e7c | [
"MIT"
]
| null | null | null | #! /usr/bin/env python
# coding: utf-8
# __author__ = 'meisanggou'
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
if sys.version_info <= (2, 7):
sys.stderr.write("ERROR: ldap-user requires Python Version 2.7 or above.\n")
sys.stderr.write("Your Python Version is %s.%s.%s.\n" % sys.version_info[:3])
sys.exit(1)
name = "ldap-user"
version = "0.5"
url = "https://github.com/meisanggou/ldapuser"
license = "MIT"
author = "meisanggou"
short_description = "use ldap verify user"
long_description = """use ldap verify user"""
keywords = "ldap-user"
install_requires = ["python-ldap", "six"]
entry_points = {'console_scripts': [
'jy-ldap-config=ldap_user.cli:create_config'
]}
setup(name=name,
version=version,
author=author,
author_email="[email protected]",
url=url,
packages=["ldap_user", "ldap_user/util"],
license=license,
description=short_description,
long_description=long_description,
keywords=keywords,
install_requires=install_requires,
entry_points=entry_points
)
| 25.522727 | 81 | 0.685663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.369546 |
73455aa40d8fdaf8fad425f0bc60becf47571215 | 4,387 | py | Python | tests/test_infection.py | chinapnr/covid-19-data | 409fa260c16e09b7ef820435c5086207bb5e40ef | [
"MIT"
]
| 3 | 2020-05-27T01:21:50.000Z | 2020-08-20T07:54:42.000Z | tests/test_infection.py | chinapnr/covid-19-data | 409fa260c16e09b7ef820435c5086207bb5e40ef | [
"MIT"
]
| 24 | 2020-03-26T10:45:34.000Z | 2020-04-06T06:13:50.000Z | tests/test_infection.py | chinapnr/covid-19-data | 409fa260c16e09b7ef820435c5086207bb5e40ef | [
"MIT"
]
| null | null | null | import json
import pytest
@pytest.mark.usefixtures('client', 'headers')
class TestInfection:
def test_infection_region_tc01(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'false'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc02(self, client, headers):
# db has no data BETWEEN 2020-03-25 2020-03-26
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-25',
'end_date': '2020-03-26',
'include_hmt': 'false'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc03(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc04(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
# 'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_region_tc05(self, client, headers):
# db has data BETWEEN 2020-03-22 2020-03-24
# look up detail
region = 'China'
payload = {
'region': region,
'start_date': '2020-01-22',
# 'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region', params=payload, headers=headers)
assert response.status_code == 400
print("response: ", response.text)
response_data = json.loads(response.text)['code']
assert response_data == "30018"
def test_infection_region_detail(self, client, headers):
region = 'China'
payload = {
'region': region,
'start_date': '2020-03-22',
'end_date': '2020-03-24',
'include_hmt': 'true'
}
response = client.get('/infection/region/detail', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
@pytest.mark.skip
def test_infection_area(self, client, headers):
region = 'China'
area = 'Chongqing'
payload = {
'region': region,
'area': area,
'start_date': '2020-03-22',
'end_date': '2020-03-24'
}
response = client.get('/infection/area', params=payload, headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
def test_infection_global(self, client, headers):
response = client.get('/infection/global', headers=headers)
assert response.status_code == 200
print("response: ", response.text)
response_data = json.loads(response.text)['data']
assert response_data
| 35.096 | 90 | 0.581947 | 4,311 | 0.982676 | 0 | 0 | 4,357 | 0.993162 | 0 | 0 | 1,161 | 0.264646 |
734586c386b99571285203cdfc4477ce123175a7 | 19,442 | py | Python | tests/test_util_owsutil.py | TimFranken/pydov | da393129207c93ab845a28525864c13374459dbf | [
"MIT"
]
| null | null | null | tests/test_util_owsutil.py | TimFranken/pydov | da393129207c93ab845a28525864c13374459dbf | [
"MIT"
]
| null | null | null | tests/test_util_owsutil.py | TimFranken/pydov | da393129207c93ab845a28525864c13374459dbf | [
"MIT"
]
| null | null | null | """Module grouping tests for the pydov.util.owsutil module."""
import copy
import re
import pytest
from numpy.compat import unicode
from owslib.etree import etree
from owslib.fes import (
PropertyIsEqualTo,
FilterRequest,
)
from owslib.iso import MD_Metadata
from owslib.util import nspath_eval
from pydov.util import owsutil
from pydov.util.errors import (
MetadataNotFoundError,
FeatureCatalogueNotFoundError,
)
from pydov.util.location import (
Within,
Box,
)
from tests.test_search_boring import (
md_metadata,
mp_remote_md,
mp_remote_describefeaturetype,
mp_remote_fc,
location_md_metadata,
location_fc_featurecatalogue,
location_wfs_describefeaturetype,
)
from tests.test_search import (
wfs,
mp_wfs,
mp_remote_fc_notfound
)
def clean_xml(xml):
"""Clean the given XML string of namespace definition, namespace
prefixes and syntactical but otherwise meaningless differences.
Parameters
----------
xml : str
String representation of XML document.
Returns
-------
str
String representation of cleaned XML document.
"""
# remove xmlns namespace definitions
r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml)
# remove namespace prefixes in tags
r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r)
# remove extra spaces in tags
r = re.sub(r'[ ]+/>', '/>', r)
# remove extra spaces between tags
r = re.sub(r'>[ ]+<', '><', r)
return r
class TestOwsutil(object):
"""Class grouping tests for the pydov.util.owsutil module."""
def test_get_csw_base_url(self, wfs):
"""Test the owsutil.get_csw_base_url method.
Test whether the CSW base URL of the dov-pub:Boringen layer is correct.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
contentmetadata = wfs.contents['dov-pub:Boringen']
assert owsutil.get_csw_base_url(contentmetadata) == \
'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw'
def test_get_csw_base_url_nometadataurls(self, wfs):
"""Test the owsutil.get_csw_base_url method for a layer without
metdata urls.
Test whether a MetadataNotFoundError is raised.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
contents = copy.deepcopy(wfs.contents)
contentmetadata = contents['dov-pub:Boringen']
contentmetadata.metadataUrls = []
with pytest.raises(MetadataNotFoundError):
owsutil.get_csw_base_url(contentmetadata)
def test_get_featurecatalogue_uuid(self, md_metadata):
"""Test the owsutil.get_featurecatalogue_uuid method.
Test whether the featurecatalogue uuid of the dov-pub:Boringen layer
is correct.
Parameters
----------
md_metadata : pytest.fixture providing owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
assert owsutil.get_featurecatalogue_uuid(md_metadata) == \
'c0cbd397-520f-4ee1-aca7-d70e271eeed6'
def test_get_featurecatalogue_uuid_nocontentinfo(self, md_metadata):
"""Test the owsutil.get_featurecatalogue_uuid method when the
metadata is missing the gmd:contentInfo element.
Test whether a FeatureCatalogueNotFoundError is raised.
Parameters
----------
md_metadata : pytest.fixture providing owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
tree = etree.fromstring(md_metadata.xml)
root = tree.find('{http://www.isotc211.org/2005/gmd}MD_Metadata')
for ci in tree.findall(
'.//{http://www.isotc211.org/2005/gmd}contentInfo'):
root.remove(ci)
md_metadata.xml = etree.tostring(tree)
with pytest.raises(FeatureCatalogueNotFoundError):
owsutil.get_featurecatalogue_uuid(md_metadata)
def test_get_featurecatalogue_uuid_nouuidref(self, md_metadata):
"""Test the owsutil.get_featurecatalogue_uuid method when the
gmd:contentInfo element is missing a 'uuidref' attribute.
Test whether a FeatureCatalogueNotFoundError is raised.
Parameters
----------
md_metadata : pytest.fixture providing owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
tree = etree.fromstring(md_metadata.xml)
for ci in tree.findall(nspath_eval(
'gmd:MD_Metadata/gmd:contentInfo/'
'gmd:MD_FeatureCatalogueDescription/'
'gmd:featureCatalogueCitation',
{'gmd': 'http://www.isotc211.org/2005/gmd'})):
ci.attrib.pop('uuidref')
md_metadata.xml = etree.tostring(tree)
with pytest.raises(FeatureCatalogueNotFoundError):
owsutil.get_featurecatalogue_uuid(md_metadata)
def test_get_namespace(self, wfs, mp_remote_describefeaturetype):
"""Test the owsutil.get_namespace method.
Test whether the namespace of the dov-pub:Boringen layer is correct.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType of the
dov-pub:Boringen layer.
"""
assert owsutil.get_namespace(wfs, 'dov-pub:Boringen') == \
'http://dov.vlaanderen.be/ocdov/dov-pub'
def test_get_remote_featurecatalogue(self, mp_remote_fc):
"""Test the owsutil.get_remote_featurecatalogue method.
Test whether the feature catalogue of the dov-pub:Boringen layer
matches the format described in the docs.
Parameters
----------
mp_remote_fc : pytest.fixture
Monkeypatch the call to get the remote feature catalogue of the
dov-pub:Boringen layer.
"""
fc = owsutil.get_remote_featurecatalogue(
'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw',
'c0cbd397-520f-4ee1-aca7-d70e271eeed6')
assert type(fc) is dict
assert 'definition' in fc
assert type(fc['definition']) in (str, unicode)
assert 'attributes' in fc
assert type(fc['attributes']) is dict
attrs = fc['attributes']
if len(attrs) > 0:
for attr in attrs.values():
assert type(attr) is dict
assert 'definition' in attr
assert type(attr['definition']) in (str, unicode)
assert 'values' in attr
assert type(attr['values']) is list
if len(attr['values']) > 0:
for v in attr['values']:
assert type(v) in (str, unicode)
assert len(attr['values']) == len(set(attr['values']))
assert 'multiplicity' in attr
mp = attr['multiplicity']
assert type(mp) is tuple
assert len(mp) == 2
assert mp[0] in (0, 1)
assert (type(mp[1]) is int and mp[1] > 0) or mp[1] == 'Inf'
def test_get_remote_featurecataloge_baduuid(self, mp_remote_fc_notfound):
"""Test the owsutil.get_remote_featurecatalogue method with an
inexistent feature catalogue uuid.
Test whether a FeatureCatalogueNotFoundError is raised.
Parameters
----------
mp_remote_fc_notfound : pytest.fixture
Monkeypatch the call to get an inexistent remote featurecatalogue.
"""
with pytest.raises(FeatureCatalogueNotFoundError):
owsutil.get_remote_featurecatalogue(
'https://www.dov.vlaanderen.be/geonetwork/srv/nl/csw',
'badfc000-0000-0000-0000-badfc00badfc')
def test_get_remote_metadata(self, md_metadata):
"""Test the owsutil.get_remote_metadata method.
Test whether the resulting MD_Metadata is correct.
Parameters
----------
md_metadata : pytest.fixture returning owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
assert type(md_metadata) is MD_Metadata
def test_get_remote_metadata_nometadataurls(self, wfs):
"""Test the owsutil.get_remote_metadata method when the WFS layer
missed metadata URLs.
Test whether a MetadataNotFoundError is raised.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
contents = copy.deepcopy(wfs.contents)
contentmetadata = contents['dov-pub:Boringen']
contentmetadata.metadataUrls = []
with pytest.raises(MetadataNotFoundError):
owsutil.get_remote_metadata(contentmetadata)
def test_wfs_build_getfeature_request_onlytypename(self):
"""Test the owsutil.wfs_build_getfeature_request method with only a
typename specified.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
xml = owsutil.wfs_build_getfeature_request('dov-pub:Boringen')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query '
'typeName="dov-pub:Boringen"><ogc:Filter '
'xmlns:ogc="http://www.opengis.net/ogc"/></wfs:Query></wfs'
':GetFeature>')
def test_wfs_build_getfeature_request_bbox_nogeometrycolumn(self):
"""Test the owsutil.wfs_build_getfeature_request method with a location
argument but without the geometry_column argument.
Test whether an AttributeError is raised.
"""
with pytest.raises(AttributeError):
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen',
location=Within(Box(151650, 214675, 151750, 214775)))
def test_wfs_build_getfeature_request_bbox(self):
"""Test the owsutil.wfs_build_getfeature_request method with a
typename, box and geometry_column.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen',
location=Within(Box(151650, 214675, 151750, 214775)),
geometry_column='geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"><wfs:Query '
'typeName="dov-pub:Boringen"><ogc:Filter '
'xmlns:ogc="http://www.opengis.net/ogc"><ogc:Within> '
'<ogc:PropertyName>geom</ogc:PropertyName><gml:Envelope '
'xmlns:gml="http://www.opengis.net/gml" srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
':lowerCorner>151650.000 '
'214675.000</gml:lowerCorner><gml:upperCorner>151750.000 '
'214775.000</gml:upperCorner></gml:Envelope></ogc:Within></ogc'
':Filter></wfs:Query></wfs:GetFeature>')
def test_wfs_build_getfeature_request_propertyname(self):
"""Test the owsutil.wfs_build_getfeature_request method with a list
of propertynames.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', propertyname=['fiche', 'diepte_tot_m'])
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> '
'<wfs:PropertyName>fiche</wfs:PropertyName> '
'<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter/> '
'</wfs:Query> </wfs:GetFeature>')
def test_wfs_build_getfeature_request_filter(self):
"""Test the owsutil.wfs_build_getfeature_request method with an
attribute filter.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Herstappe')
filter_request = FilterRequest()
filter_request = filter_request.setConstraint(query)
try:
filter_request = etree.tostring(filter_request,
encoding='unicode')
except LookupError:
# Python2.7 without lxml uses 'utf-8' instead.
filter_request = etree.tostring(filter_request,
encoding='utf-8')
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', filter=filter_request)
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> <ogc:Filter> '
'<ogc:PropertyIsEqualTo> '
'<ogc:PropertyName>gemeente</ogc:PropertyName> '
'<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
'</ogc:Filter> </wfs:Query> </wfs:GetFeature>')
def test_wfs_build_getfeature_request_bbox_filter(self):
"""Test the owsutil.wfs_build_getfeature_request method with an
attribute filter, a box and a geometry_column.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Herstappe')
filter_request = FilterRequest()
filter_request = filter_request.setConstraint(query)
try:
filter_request = etree.tostring(filter_request,
encoding='unicode')
except LookupError:
# Python2.7 without lxml uses 'utf-8' instead.
filter_request = etree.tostring(filter_request,
encoding='utf-8')
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', filter=filter_request,
location=Within(Box(151650, 214675, 151750, 214775)),
geometry_column='geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> <ogc:Filter> <ogc:And> '
'<ogc:PropertyIsEqualTo> '
'<ogc:PropertyName>gemeente</ogc:PropertyName> '
'<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
'<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> '
'<gml:Envelope xmlns:gml="http://www.opengis.net/gml" '
'srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> '
'<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> '
'<gml:upperCorner>151750.000 214775.000</gml:upperCorner> '
'</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> '
'</wfs:Query> </wfs:GetFeature>')
def test_wfs_build_getfeature_request_bbox_filter_propertyname(self):
"""Test the owsutil.wfs_build_getfeature_request method with an
attribute filter, a box, a geometry_column and a list of
propertynames.
Test whether the XML of the WFS GetFeature call is generated correctly.
"""
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Herstappe')
filter_request = FilterRequest()
filter_request = filter_request.setConstraint(query)
try:
filter_request = etree.tostring(filter_request,
encoding='unicode')
except LookupError:
# Python2.7 without lxml uses 'utf-8' instead.
filter_request = etree.tostring(filter_request,
encoding='utf-8')
xml = owsutil.wfs_build_getfeature_request(
'dov-pub:Boringen', filter=filter_request,
location=Within(Box(151650, 214675, 151750, 214775)),
geometry_column='geom', propertyname=['fiche', 'diepte_tot_m'])
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'service="WFS" version="1.1.0" '
'xsi:schemaLocation="http://www.opengis.net/wfs '
'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd"> <wfs:Query '
'typeName="dov-pub:Boringen"> '
'<wfs:PropertyName>fiche</wfs:PropertyName> '
'<wfs:PropertyName>diepte_tot_m</wfs:PropertyName> <ogc:Filter> '
'<ogc:And> <ogc:PropertyIsEqualTo> '
'<ogc:PropertyName>gemeente</ogc:PropertyName> '
'<ogc:Literal>Herstappe</ogc:Literal> </ogc:PropertyIsEqualTo> '
'<ogc:Within> <ogc:PropertyName>geom</ogc:PropertyName> '
'<gml:Envelope xmlns:gml="http://www.opengis.net/gml" '
'srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"> '
'<gml:lowerCorner>151650.000 214675.000</gml:lowerCorner> '
'<gml:upperCorner>151750.000 214775.000</gml:upperCorner> '
'</gml:Envelope> </ogc:Within> </ogc:And> </ogc:Filter> '
'</wfs:Query> </wfs:GetFeature>')
| 40.419958 | 79 | 0.620152 | 17,939 | 0.922693 | 0 | 0 | 0 | 0 | 0 | 0 | 10,898 | 0.560539 |
7347c43851f55966f151bfafefba0299301f676e | 1,430 | py | Python | manage.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
]
| null | null | null | manage.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
]
| null | null | null | manage.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
manage
~~~~~~
Flask-Script Manager
"""
import os
from flask.ext.script import Manager
from flask.ext.migrate import MigrateCommand
from fbone import create_app
from fbone.extensions import db
from fbone.utils import PROJECT_PATH, MALE
from fbone.modules.user import User, ADMIN, ACTIVE
from fbone.modules.movies import Movie
from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand
app = create_app()
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config', required=False)
manager.add_command('create_user', CreateUserCommand())
manager.add_command('delete_user', DeleteUserCommand())
manager.add_command('list_users', ListUsersCommand())
manager.add_command('db', MigrateCommand)
@manager.command
def initdb():
"""Init/reset database."""
db.drop_all()
db.create_all()
admin = User(
name=u'admin',
fullname=u'Agador Spartacus',
email=u'[email protected]',
password=u'123456',
role_code=ADMIN,
status_code=ACTIVE,
gender_code=MALE,
bio=u'FSU Grad. Go Noles!')
db.session.add(admin)
db.session.commit()
@manager.command
def tests():
"""Run the tests."""
import pytest
exit_code = pytest.main([os.path.join(PROJECT_PATH, 'tests'), '--verbose'])
return exit_code
if __name__ == "__main__":
manager.run()
| 23.442623 | 94 | 0.692308 | 0 | 0 | 0 | 0 | 586 | 0.40979 | 0 | 0 | 294 | 0.205594 |
7347cda8008de4de3e2356287a34d7f4b9da7478 | 444 | py | Python | llvmsqlite_util/benchmarking/micro/aggregate.py | KowalskiThomas/LLVMSQLite | a36b85dfadf44b0a4008d9f01ebd79d5ca2cace4 | [
"blessing"
]
| null | null | null | llvmsqlite_util/benchmarking/micro/aggregate.py | KowalskiThomas/LLVMSQLite | a36b85dfadf44b0a4008d9f01ebd79d5ca2cace4 | [
"blessing"
]
| null | null | null | llvmsqlite_util/benchmarking/micro/aggregate.py | KowalskiThomas/LLVMSQLite | a36b85dfadf44b0a4008d9f01ebd79d5ca2cace4 | [
"blessing"
]
| null | null | null | import os
sql_files = [x for x in os.listdir(".") if x.endswith("sql")]
sql_files = list(sorted(sql_files, key = lambda x : int(x.split('.')[0])))
result = ""
for i, f in enumerate(sql_files):
i = i + 1
i = f.replace(".sql", "")
with open(f) as sql:
result += f"--- Query {i}\n"
result += sql.read().strip()
result += "\n\n\n"
result = result.strip()
with open("output.txt", 'w') as f:
f.write(result) | 26.117647 | 74 | 0.554054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.13964 |
73486adc08d97e3620d3c9533949d0a3a23b6c00 | 2,882 | py | Python | demos/crane/main.py | Starli8ht/KivyMD | 5a3425d4e39e8615a0ba8b879db1eb5d7bfb3b49 | [
"MIT"
]
| null | null | null | demos/crane/main.py | Starli8ht/KivyMD | 5a3425d4e39e8615a0ba8b879db1eb5d7bfb3b49 | [
"MIT"
]
| null | null | null | demos/crane/main.py | Starli8ht/KivyMD | 5a3425d4e39e8615a0ba8b879db1eb5d7bfb3b49 | [
"MIT"
]
| null | null | null | """
MDCrane demo
=============
.. seealso::
`Material Design spec,
Crane <https://material.io/design/material-studies/crane.html#>`
Crane is a travel app that helps users find and book travel, lodging, and
restaurant options that match their input preferences.
"""
import os
import sys
from pathlib import Path
from kivy.lang import Builder
from kivymd.app import MDApp
if getattr(sys, "frozen", False): # bundle mode with PyInstaller
os.environ["CRANE_ROOT"] = sys._MEIPASS
else:
os.environ["CRANE_ROOT"] = str(Path(__file__).parent)
KV_DIR = f"{os.path.dirname(__file__)}/libs/kv/"
for kv_file in os.listdir(KV_DIR):
with open(os.path.join(KV_DIR, kv_file), encoding="utf-8") as kv:
Builder.load_string(kv.read())
KV = """
#:import FadeTransition kivy.uix.screenmanager.FadeTransition
#:import CraneRootScreen libs.baseclass.root_screen.CraneRootScreen
ScreenManager:
transition: FadeTransition()
CraneRootScreen:
name: "crane root screen"
"""
class MDCrane(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title = "Crane"
self.icon = f"{os.environ['CRANE_ROOT']}/assets/images/logo.png"
self.theme_cls.primary_palette = "Gray"
self.theme_cls.primary_hue = "100"
def build(self):
FONT_PATH = f"{os.environ['CRANE_ROOT']}/assets/fonts/"
self.theme_cls.font_styles.update(
{
"H1": [FONT_PATH + "Raleway-Light", 96, False, -1.5],
"H2": [FONT_PATH + "Raleway-Regular", 60, False, -0.5],
"H3": [FONT_PATH + "Raleway-SemiBold", 48, False, 0],
"H4": [FONT_PATH + "Raleway-SemiBold", 34, False, 0.25],
"H5": [FONT_PATH + "Raleway-SemiBold", 24, False, 0],
"H6": [FONT_PATH + "Raleway-SemiBold", 20, False, 0.15],
"Subtitle1": [
FONT_PATH + "Raleway-Medium",
16,
False,
0.15,
],
"Subtitle2": [
FONT_PATH + "Raleway-SemiBold",
14,
False,
0.1,
],
"Body1": [FONT_PATH + "Raleway-SemiBold", 16, False, 0.5],
"Body2": [FONT_PATH + "Raleway-Regular", 14, False, 0.25],
"Button": [FONT_PATH + "Raleway-SemiBold", 14, True, 1.25],
"Caption": [
FONT_PATH + "Raleway-Medium",
12,
False,
0.4,
],
"Overline": [
FONT_PATH + "Raleway-SemiBold",
12,
True,
1.5,
],
}
)
return Builder.load_string(KV)
MDCrane().run()
| 29.408163 | 77 | 0.519084 | 1,849 | 0.641568 | 0 | 0 | 0 | 0 | 0 | 0 | 1,058 | 0.367106 |
7349101381b3dbb9e23adbac5458b1fa8f012f0b | 8,368 | py | Python | tasks/lgutil/graph_net.py | HimmelStein/lg-flask | 562adfe16c3dd718faf694b8233586422d035e17 | [
"MIT"
]
| null | null | null | tasks/lgutil/graph_net.py | HimmelStein/lg-flask | 562adfe16c3dd718faf694b8233586422d035e17 | [
"MIT"
]
| null | null | null | tasks/lgutil/graph_net.py | HimmelStein/lg-flask | 562adfe16c3dd718faf694b8233586422d035e17 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from nltk.parse import DependencyGraph
from collections import defaultdict
import random
import sys
import copy
from json import dumps
from pprint import pprint
try:
from .lg_graph import LgGraph
except:
sys.path.append("/Users/tdong/git/lg-flask/tasks/lgutil")
from .lg_graph import LgGraph
class GraphNet(DependencyGraph):
"""
{'address': 1,
'ctag': 'PRO',
'deps': defaultdict(list, {'remove-link-verb':[..]}),
'feats': '3|Sg|Masc|Nom',
'head': 2,
'lemma': 'er', --> 'lemma' : <sentence of the ldg>
'tag': 'PPER',
'word': 'Er' --> 'ldg': <graph>
}
tag, ctag, and feats are not used!
"""
def __init__(self, ldg=None):
DependencyGraph.__init__(self)
self.nodes = defaultdict(lambda: {'address': None,
'ldg': 0,
'gid': 1, #has the same value of the gid of nodes in ldg.
'lemma': None,
'head': None,
'deps': defaultdict(int),
'remaining_ops': defaultdict(list), #list(LgGraph.operator_dic.keys()),
'ctag': None,
'tag': None,
'feats': None,
})
self.git_list = [1]
self.nodes[0].update(
{'address': 0,
'head': -1,
'ldg': 'TOP',
'gid': 1, #has the same value of the gid of nodes in ldg.
'remaining_ops': defaultdict(list),
}
)
if isinstance(ldg, LgGraph):
self.nodes[0]['ldg'] = ldg
if isinstance(ldg, GraphNet):
self.nodes = ldg
self.git_list = ldg.get_git_list()
def get_next_gid(self):
gid = random.randint(2,99)
while gid in self.git_list:
gid = random.randint(2, 99)
self.git_list.append(gid)
return gid
def get_git_list(self):
return list(self.nodes.keys())
def set_gid(self, gid):
for node in self.nodes.values():
node['gid'] = gid
if isinstance(node['ldg'], LgGraph):
node['ldg'].set_gid(gid)
def set_head(self, gid, address=1):
self.nodes[address]['head'] = gid
def set_key_address_same_as_gid(self, address, newGid):
if address in self.nodes.keys():
self.nodes[newGid] = copy.deepcopy(self.nodes[address])
self.nodes[newGid]['address'] = newGid
del self.nodes[address]
def to_json(self):
dic = {}
for nodeId in self.nodes.keys():
dic[nodeId] = self.nodes[nodeId]
if isinstance(dic[nodeId]['ldg'], LgGraph):
dic[nodeId]['ldg'] = dic[nodeId]['ldg'].ldg2json()
pprint(dic)
return dic
def _remove_node(self, address):
del self.nodes[address]
def gen_ldg_in_net(self):
for node in self.nodes.values():
if isinstance(node['ldg'], LgGraph):
yield node
def fork_ldg(self, ldg=None):
"""
if ldg == None
if ldg != None
:param ldg:
:return:
"""
if isinstance(ldg, LgGraph):
gid = ldg.get_gid()
newGid = self.get_next_gid()
cpLdg = copy.deepcopy(ldg)
cpLdg.set_gid(newGid)
self.nodes[newGid]['ldg'] = cpLdg
self.nodes[newGid]['address']= newGid
self.nodes[newGid]['head'] = gid
self.nodes[newGid]['gid'] = newGid # has the same value of the gid of nodes in ldg.
self.nodes[newGid]['remaining_ops'] = list(LgGraph.operator_dic.keys())
self.nodes[gid]['deps'].update({'fork'+str(newGid): newGid})
else:
newGid = self.get_next_gid()
self.nodes[newGid].update(
{'address': newGid,
'head': 0,
'ldg': None,
'gid': newGid, # has the same value of the gid of nodes in ldg.
'remaining_ops': []
}
)
self.nodes[0]['deps'].update({'fork'+str(newGid): newGid})
return newGid
def change_to_ER_graph(self):
"""
change the ldg into an ER graph
:return:
"""
for node in self.nodes.values():
lgGraph = node['ldg']
if lgGraph:
erGraph = lgGraph.get_ER_graph()
node['ldg'] = erGraph
def gen_ER_graph(self, ldg):
fork_gid = self.fork_ldg(ldg = ldg)
for graphNode in list(self.gen_ldg_in_net()):
print('in gen_ER_graph')
lgGraph = graphNode['ldg']
erGraph = lgGraph.get_ER_graph()
print('** ergraph')
newGraphNet = GraphNet(ldg = erGraph)
return newGraphNet
#newGraphNet.to_json()
#newGraphNet.remove_by_address(0)
#newGid = self.get_next_gid()
#newGraphNet.set_gid(newGid)
#gid = int(lgGraph.get_gid())
#newGraphNet.set_key_address_same_as_gid(1, newGid)
#newGraphNet.set_head(gid, address=newGid)
#self.nodes.update(newGraphNet.nodes)
#applied = True
#return applied
def apply_graph_operation(self, operator):
"""
apply operator to all nodes with non-null 'ldg' key of self, except the TOP node
for node in self.nodes.values():
if node.applicatable(operator){
newNode = node.apply(operator)
newGid = self.get_next_gid()
newNode.set_gid(newGid)
gid = node.get_gid()
newNodeInNet = GraphNet(ldg = new_node)
newNodeInNet['head'] = gid
self.nodes[gid]['deps'].append(newGid)
self.nodes[newGid] = newNodeInNet
}
:param operator:
:return:
"""
def remove_operator_from_node(node, operator):
if operator in node['remaining_ops']:
index = node['remaining_ops'].index(operator)
del node['remaining_ops'][index]
return node
applied = False
for graphNode in list(self.gen_ldg_in_net()):
lgGraph = graphNode['ldg']
if operator in graphNode['remaining_ops'] and lgGraph.is_applicable(operator):
graphNode = remove_operator_from_node(graphNode, operator)
newGraph = lgGraph.apply_operator(operator)
newGraphNet = GraphNet(ldg = newGraph)
newGraphNet.remove_by_address(0)
newGid = self.get_next_gid()
newGraphNet.set_gid(newGid)
gid = int(lgGraph.get_gid())
newGraphNet.set_key_address_same_as_gid(1, newGid)
newGraphNet.set_head(gid, address=newGid)
self.nodes[gid]['deps'][operator].append(newGid)
self.nodes.update(newGraphNet.nodes)
applied = True
else:
graphNode = remove_operator_from_node(graphNode, operator)
return applied
def apply_all_graph_operators(self):
"""
this function shall generate all possible graphs
while True:
applied = False
for operator in LgGraph.operator_dic.keys():
applied = applied or self.apply_graph_operation(operator)
if not applied:
break
"""
self.gen_ER_graph()
while True:
applied = False
for operator in LgGraph.operator_dic.keys():
applied = applied or self.apply_graph_operation(operator)
if not applied:
break
if __name__ == '__main__':
LgGraph0 = LgGraph()
LgGraph0.set_sample_snt_ldg_from_db(lan='de', table='pons', num=0)
GraphNet0 = GraphNet(ldg = LgGraph0)
GraphNet0.apply_graph_operation('remove-link-verb')
pprint(GraphNet0.to_json())
| 34.866667 | 113 | 0.518164 | 7,779 | 0.929613 | 142 | 0.016969 | 0 | 0 | 0 | 0 | 2,453 | 0.293141 |
7349161371152ef9656dab45ddf6d709b3bf142a | 5,517 | py | Python | utils/transformations/char_level/char_dces_substitute.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
]
| null | null | null | utils/transformations/char_level/char_dces_substitute.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
]
| null | null | null | utils/transformations/char_level/char_dces_substitute.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
]
| null | null | null | # !/usr/bin/env python
# coding=UTF-8
"""
@Author: WEN Hao
@LastEditors: WEN Hao
@Description:
@Date: 2021-09-24
@LastEditTime: 2022-04-17
源自OpenAttack的DCESSubstitute
"""
import random
from typing import NoReturn, List, Any, Optional
import numpy as np
from utils.transformations.base import CharSubstitute
from utils.assets import fetch
from utils.misc import DEFAULTS
__all__ = [
"CharacterDCESSubstitute",
]
class CharacterDCESSubstitute(CharSubstitute):
""" """
__name__ = "CharacterDCESSubstitute"
def __init__(
self, threshold: float, random_one: bool = False, **kwargs: Any
) -> NoReturn:
""" """
super().__init__(**kwargs)
self.threshold = threshold
dces_dict = fetch("dces")
self.descs = dces_dict["descs"]
self.neigh = dces_dict["neigh"]
self.random_one = random_one
def _get_candidates(
self,
word: str,
pos_tag: Optional[str] = None,
num: Optional[int] = None,
) -> List[str]:
""" """
candidate_words = []
if self.random_one:
i = DEFAULTS.RNG.integers(0, len(word))
repl_letters = self._apply_dces(word[i], self.threshold)
if len(repl_letters) > 0:
repl_letter = random.choice(repl_letters)
candidate_word = word[:i] + repl_letter + word[i + 1 :]
candidate_words.append(candidate_word)
else:
for i in range(len(word)):
for repl_letter in self._apply_dces(word[i], self.threshold):
candidate_word = word[:i] + repl_letter + word[i + 1 :]
candidate_words.append(candidate_word)
if num:
candidate_words = candidate_words[:num]
return candidate_words
def _apply_dces(self, char: str, threshold: float) -> List[str]:
""" """
c = get_hex_string(char)
if c in self.descs:
description = self.descs[c]["description"]
else:
return []
tokens = description.split(" ")
case = "unknown"
identifiers = []
for token in tokens:
if len(token) == 1:
identifiers.append(token)
elif token == "SMALL":
case = "SMALL"
elif token == "CAPITAL":
case = "CAPITAL"
matches = []
match_ids = []
for i in identifiers:
for idx, val in self.descs.items():
desc_toks = val["description"].split(" ")
if (
i in desc_toks
and not np.any(np.in1d(desc_toks, _disallowed))
and not np.any(np.in1d(idx, _disallowed_codes))
and not int(idx, 16) > 30000
):
desc_toks = np.array(desc_toks)
case_descriptor = desc_toks[
(desc_toks == "SMALL") | (desc_toks == "CAPITAL")
]
if len(case_descriptor) > 1:
case_descriptor = case_descriptor[0]
elif len(case_descriptor) == 0:
case = "unknown"
if case == "unknown" or case == case_descriptor:
match_ids.append(idx)
matches.append(val["vec"])
if len(matches) == 0:
return []
match_vecs = np.stack(matches)
Y = match_vecs
self.neigh.fit(Y)
X = self.descs[c]["vec"].reshape(1, -1)
if Y.shape[0] > threshold:
dists, idxs = self.neigh.kneighbors(X, threshold, return_distance=True)
else:
dists, idxs = self.neigh.kneighbors(X, Y.shape[0], return_distance=True)
probs = dists.flatten()
charcodes = [match_ids[idx] for idx in idxs.flatten()]
chars = []
for idx, charcode in enumerate(charcodes):
if probs[idx] < threshold:
chars.append(chr(int(charcode, 16)))
return chars
@property
def deterministic(self) -> bool:
return not self.random_one
def extra_repr_keys(self) -> List[str]:
return super().extra_repr_keys() + [
"threshold",
"random_one",
]
_disallowed = [
"TAG",
"MALAYALAM",
"BAMUM",
"HIRAGANA",
"RUNIC",
"TAI",
"SUNDANESE",
"BATAK",
"LEPCHA",
"CHAM",
"TELUGU",
"DEVANGARAI",
"BUGINESE",
"MYANMAR",
"LINEAR",
"SYLOTI",
"PHAGS-PA",
"CHEROKEE",
"CANADIAN",
"YI",
"LYCIAN",
"HANGUL",
"KATAKANA",
"JAVANESE",
"ARABIC",
"KANNADA",
"BUHID",
"TAGBANWA",
"DESERET",
"REJANG",
"BOPOMOFO",
"PERMIC",
"OSAGE",
"TAGALOG",
"MEETEI",
"CARIAN",
"UGARITIC",
"ORIYA",
"ELBASAN",
"CYPRIOT",
"HANUNOO",
"GUJARATI",
"LYDIAN",
"MONGOLIAN",
"AVESTAN",
"MEROITIC",
"KHAROSHTHI",
"HUNGARIAN",
"KHUDAWADI",
"ETHIOPIC",
"PERSIAN",
"OSMANYA",
"ELBASAN",
"TIBETAN",
"BENGALI",
"TURKIC",
"THROWING",
"HANIFI",
"BRAHMI",
"KAITHI",
"LIMBU",
"LAO",
"CHAKMA",
"DEVANAGARI",
"ITALIC",
"CJK",
"MEDEFAIDRIN",
"DIAMOND",
"SAURASHTRA",
"ADLAM",
"DUPLOYAN",
]
_disallowed_codes = [
"1F1A4",
"A7AF",
]
def get_hex_string(ch: str) -> str:
return "{:04x}".format(ord(ch)).upper()
| 23.576923 | 84 | 0.518035 | 3,893 | 0.704871 | 0 | 0 | 81 | 0.014666 | 0 | 0 | 1,057 | 0.191381 |
734ac6f57c878180c7a2282d8eb947a2ce6b549f | 2,588 | py | Python | piptools/repositories/base.py | LaudateCorpus1/pip-tools | 53c927262d816c336664afee9b03996bfb8f9c44 | [
"BSD-3-Clause"
]
| 2 | 2021-12-02T11:41:02.000Z | 2021-12-27T12:01:53.000Z | piptools/repositories/base.py | LaudateCorpus1/pip-tools | 53c927262d816c336664afee9b03996bfb8f9c44 | [
"BSD-3-Clause"
]
| 3 | 2020-11-20T18:42:20.000Z | 2021-06-20T09:38:27.000Z | piptools/repositories/base.py | LaudateCorpus1/pip-tools | 53c927262d816c336664afee9b03996bfb8f9c44 | [
"BSD-3-Clause"
]
| 2 | 2021-07-13T08:53:43.000Z | 2022-02-02T14:10:58.000Z | import optparse
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from typing import Iterator, Optional, Set
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.index import PyPI
from pip._internal.network.session import PipSession
from pip._internal.req import InstallRequirement
class BaseRepository(metaclass=ABCMeta):
DEFAULT_INDEX_URL = PyPI.simple_url
def clear_caches(self) -> None:
"""Should clear any caches used by the implementation."""
@abstractmethod
def find_best_match(
self, ireq: InstallRequirement, prereleases: Optional[bool]
) -> InstallRequirement:
"""
Returns a pinned InstallRequirement object that indicates the best match
for the given InstallRequirement according to the external repository.
"""
@abstractmethod
def get_dependencies(self, ireq: InstallRequirement) -> Set[InstallRequirement]:
"""
Given a pinned, URL, or editable InstallRequirement, returns a set of
dependencies (also InstallRequirements, but not necessarily pinned).
They indicate the secondary dependencies for the given requirement.
"""
@abstractmethod
def get_hashes(self, ireq: InstallRequirement) -> Set[str]:
"""
Given a pinned InstallRequirement, returns a set of hashes that represent
all of the files for a given requirement. It is not acceptable for an
editable or unpinned requirement to be passed to this function.
"""
@abstractmethod
@contextmanager
def allow_all_wheels(self) -> Iterator[None]:
"""
Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.
"""
@abstractmethod
def copy_ireq_dependencies(
self, source: InstallRequirement, dest: InstallRequirement
) -> None:
"""
Notifies the repository that `dest` is a copy of `source`, and so it
has the same dependencies. Otherwise, once we prepare an ireq to assign
it its name, we would lose track of those dependencies on combining
that ireq with others.
"""
@property
@abstractmethod
def options(self) -> optparse.Values:
"""Returns parsed pip options"""
@property
@abstractmethod
def session(self) -> PipSession:
"""Returns a session to make requests"""
@property
@abstractmethod
def finder(self) -> PackageFinder:
"""Returns a package finder to interact with simple repository API (PEP 503)"""
| 34.506667 | 88 | 0.693199 | 2,240 | 0.865533 | 0 | 0 | 2,009 | 0.776275 | 0 | 0 | 1,259 | 0.486476 |
734b4343088715a23f5435206ac174b0bc22413c | 11,371 | py | Python | tfx/orchestration/portable/execution_publish_utils.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
]
| null | null | null | tfx/orchestration/portable/execution_publish_utils.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
]
| null | null | null | tfx/orchestration/portable/execution_publish_utils.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from absl import logging
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be a direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
# TODO(b/175426744): Data Binder will modify the uri.
logging.warning(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from a previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def _set_execution_result_if_not_empty(
executor_output: Optional[execution_result_pb2.ExecutorOutput],
execution: metadata_store_pb2.Execution) -> bool:
"""Sets execution result as a custom property of the execution."""
if executor_output and (executor_output.execution_result.result_message or
executor_output.execution_result.metadata_details or
executor_output.execution_result.code):
# TODO(b/190001754): Consider either switching to base64 encoding or using
# a proto descriptor pool to circumvent TypeError which may be raised when
# converting embedded `Any` protos.
try:
execution_lib.set_execution_result(executor_output.execution_result,
execution)
except TypeError:
logging.exception(
'Skipped setting execution_result as custom property of the '
'execution due to error')
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be a subset
of the system-generated output artifacts dict. 2. An update to a certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to a output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is a common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(proto_artifact, original_artifact,
len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(
metadata_handler, execution, contexts, output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers a new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts)
| 43.734615 | 97 | 0.735731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,443 | 0.478674 |
734bd8fdc6b5e208d672c4c4eac90f446f5043c6 | 6,220 | py | Python | src/dctm/datasets.py | spotify-research/dctm | e813aca23c3f54bc55ace5b3342aaec5cc7dad60 | [
"Apache-2.0"
]
| 11 | 2020-08-11T10:18:48.000Z | 2021-12-23T15:34:46.000Z | src/dctm/datasets.py | spotify-research/dctm | e813aca23c3f54bc55ace5b3342aaec5cc7dad60 | [
"Apache-2.0"
]
| null | null | null | src/dctm/datasets.py | spotify-research/dctm | e813aca23c3f54bc55ace5b3342aaec5cc7dad60 | [
"Apache-2.0"
]
| 2 | 2020-09-02T23:02:11.000Z | 2020-11-17T05:16:29.000Z | #
# Copyright 2020 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Datasets utilities.
If you use nltk you may need the following:
nltk.download('words')
nltk.download('punkt')
nltk.download('wordnet')
"""
import os
import nltk
import numpy as np
import pandas as pd
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS, CountVectorizer
from sklearn.utils import Bunch
ENGLISH_WORDS = set(nltk.corpus.words.words())
STEMMER = SnowballStemmer('english')
class LemmaTokenizer:
def __init__(self, stem=False):
self.wnl = WordNetLemmatizer()
if stem:
self.stemmer = SnowballStemmer('english')
else:
self.stemmer = Bunch(stem=lambda x: x)
def __call__(self, doc):
return [
self.wnl.lemmatize(self.stemmer.stem(t))
for t in word_tokenize(doc) if t.lower() in ENGLISH_WORDS
]
def get_neurips(filename: str):
"""Get NeurIPS dataset.
Args:
filename (str): Location of the file for NeurIPS dataset.
"""
df = pd.read_csv(filename, header=0, index_col=0)
year = np.array([x.split('_')[0] for x in df.columns])
# preprocess
df = df.loc[df.index.dropna()]
df = df.loc[~df.index.isin(ENGLISH_STOP_WORDS)]
df.index = [STEMMER.stem(x) for x in df.index.tolist()]
# merge same words together
df = df.groupby(level=0).sum()
vocabulary = df.sum(axis=1)
return df, year, vocabulary
def get_sotu(path: str, stem=False):
df = {}
for filename in sorted(os.listdir(path)):
fn = os.path.join(path, filename)
df[filename] = ' '.join(
[x.decode("utf-8") for x in open(fn, 'rb').readlines()])
df = pd.Series(df)
df.index = df.index.str.split('.txt').map(lambda x: x[0])
df = pd.DataFrame(df, columns=['text'])
df['years'] = df.index.str.split('_').map(lambda x: int(x[1]))
df['author'] = df.index.str.split('_').map(lambda x: x[0])
stopwords_english = LemmaTokenizer(stem=stem)(
' '.join(list(ENGLISH_STOP_WORDS)))
vect = CountVectorizer(
max_df=0.9, min_df=50, stop_words=stopwords_english,
tokenizer=LemmaTokenizer(stem=stem))
corpus = vect.fit_transform(df.text)
vocabulary = np.array(vect.get_feature_names())
keep = np.array(corpus.sum(axis=1) > 0).flatten()
corpus = corpus[keep]
df = df.loc[keep]
return df, corpus, vocabulary
import json
def get_doj(filename: str = 'data/doj.json', stem=True, min_counts=50):
df = []
with open(filename, 'r') as f:
for line in f:
df.append(json.loads(line))
df = pd.DataFrame(df).set_index('id')
df.index = range(df.shape[0])
df['text'] = df.title + ' ' + df.contents
days = pd.to_datetime(
df.date.str.split('T').map(lambda x: x[0]).str.split('-').map(
lambda x: '-'.join(x[:-1])), format='%Y-%m')
df['days'] = days
df['time_delta'] = (df.days - df.days.min()).dt.days
stop_words = LemmaTokenizer(stem=stem)(
' '.join(list(ENGLISH_STOP_WORDS)))
vectorizer = CountVectorizer(
max_df=0.85, min_df=min_counts, stop_words=stop_words,
tokenizer=LemmaTokenizer(stem=stem))
corpus = vectorizer.fit_transform(df.text)
vocabulary = np.array(vectorizer.get_feature_names())
keep = np.array(corpus.sum(axis=1) > 0).flatten()
corpus = corpus[keep]
df = df.loc[keep]
return df, corpus, vocabulary
def train_test_split(X, index_points, train_size=0.75, return_sorted=True):
unique_index_points = np.unique(index_points)
train_idx = np.random.choice(
unique_index_points, int(len(unique_index_points) * train_size),
replace=False)
tr_idx = np.array([x in train_idx for x in index_points.flatten()])
index_tr = index_points[tr_idx]
X_tr = X[tr_idx]
test_idx = np.unique(list(set(unique_index_points) - set(train_idx)))
ts_idx = np.array([x in test_idx for x in index_points.flatten()])
index_ts = index_points[ts_idx]
X_ts = X[ts_idx]
idx = np.argsort(index_tr, axis=0).flatten()
X_tr_sorted = X_tr[idx]
index_tr_sorted = index_tr[idx]
idx = np.argsort(index_ts, axis=0).flatten()
X_ts_sorted = X_ts[idx]
index_ts_sorted = index_ts[idx]
return_list = [X_tr, X_ts, index_tr, index_ts]
if return_sorted:
return_list += [
X_tr_sorted, X_ts_sorted, index_tr_sorted, index_ts_sorted
]
return return_list
def print_to_file_for_gdtm(df, vocabulary, corpus, filename='test', path='.'):
"""Utility function to save datasets for gDTM.
Args:
df ([type]): [description]
vocabulary ([type]): [description]
corpus ([type]): [description]
filename (str, optional): [description]. Defaults to 'test'.
"""
with open(os.path.join(path, '{}_corpus.txt'.format(filename)), 'w') as f:
n_times = df.years.unique().size
f.writelines('{}\n'.format(n_times))
for name, group in df.groupby('years')[0]:
n_docs = group.shape[0]
f.writelines('{}\n{}\n'.format(name.timestamp(), n_docs))
idx = group.index.values
# np.array([df.index.get_loc(x) for x in group.index])
for c in corpus[idx]:
d = c.todok()
f.writelines(
str(len(d)) + ' ' + ' '.join(
'{}:{}'.format(x[1], int(v))
for x, v in d.items()) + '\n')
with open(os.path.join(path, '{}_lexicon.txt'.format(filename)), 'w') as f:
f.writelines('\n'.join(vocabulary))
| 32.910053 | 79 | 0.635852 | 412 | 0.066238 | 0 | 0 | 0 | 0 | 0 | 0 | 1,385 | 0.222669 |
734d1d2e2ae1a6d7737d630a4bc5c6e70adf63d2 | 28,079 | py | Python | billingbudgets/google/cloud/billing_budgets_v1beta1/proto/budget_model_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
]
| 1 | 2019-12-09T11:40:28.000Z | 2019-12-09T11:40:28.000Z | billingbudgets/google/cloud/billing_budgets_v1beta1/proto/budget_model_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
]
| 1 | 2019-03-29T22:03:48.000Z | 2019-04-02T22:24:45.000Z | billingbudgets/google/cloud/billing_budgets_v1beta1/proto/budget_model_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
]
| 1 | 2019-03-29T18:26:16.000Z | 2019-03-29T18:26:16.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/billing/budgets_v1beta1/proto/budget_model.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.type import money_pb2 as google_dot_type_dot_money__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/billing/budgets_v1beta1/proto/budget_model.proto",
package="google.cloud.billing.budgets.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n(com.google.cloud.billing.budgets.v1beta1P\001ZKgoogle.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1;budgets"
),
serialized_pb=_b(
'\n=google/cloud/billing/budgets_v1beta1/proto/budget_model.proto\x12$google.cloud.billing.budgets.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x17google/type/money.proto"\xde\x03\n\x06\x42udget\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12H\n\rbudget_filter\x18\x03 \x01(\x0b\x32,.google.cloud.billing.budgets.v1beta1.FilterB\x03\xe0\x41\x01\x12G\n\x06\x61mount\x18\x04 \x01(\x0b\x32\x32.google.cloud.billing.budgets.v1beta1.BudgetAmountB\x03\xe0\x41\x02\x12Q\n\x0fthreshold_rules\x18\x05 \x03(\x0b\x32\x33.google.cloud.billing.budgets.v1beta1.ThresholdRuleB\x03\xe0\x41\x02\x12S\n\x10\x61ll_updates_rule\x18\x06 \x01(\x0b\x32\x34.google.cloud.billing.budgets.v1beta1.AllUpdatesRuleB\x03\xe0\x41\x01\x12\x11\n\x04\x65tag\x18\x07 \x01(\tB\x03\xe0\x41\x01:]\xea\x41Z\n$billingbudgets.googleapis.com/Budget\x12\x32\x62illingAccounts/{billing_account}/budgets/{budget}"\xa5\x01\n\x0c\x42udgetAmount\x12.\n\x10specified_amount\x18\x01 \x01(\x0b\x32\x12.google.type.MoneyH\x00\x12T\n\x12last_period_amount\x18\x02 \x01(\x0b\x32\x36.google.cloud.billing.budgets.v1beta1.LastPeriodAmountH\x00\x42\x0f\n\rbudget_amount"\x12\n\x10LastPeriodAmount"\xcd\x01\n\rThresholdRule\x12\x1e\n\x11threshold_percent\x18\x01 \x01(\x01\x42\x03\xe0\x41\x02\x12S\n\x0bspend_basis\x18\x02 \x01(\x0e\x32\x39.google.cloud.billing.budgets.v1beta1.ThresholdRule.BasisB\x03\xe0\x41\x01"G\n\x05\x42\x61sis\x12\x15\n\x11\x42\x41SIS_UNSPECIFIED\x10\x00\x12\x11\n\rCURRENT_SPEND\x10\x01\x12\x14\n\x10\x46ORECASTED_SPEND\x10\x02"H\n\x0e\x41llUpdatesRule\x12\x19\n\x0cpubsub_topic\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1b\n\x0eschema_version\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x90\x02\n\x06\x46ilter\x12\x15\n\x08projects\x18\x01 \x03(\tB\x03\xe0\x41\x01\x12\x66\n\x16\x63redit_types_treatment\x18\x04 \x01(\x0e\x32\x41.google.cloud.billing.budgets.v1beta1.Filter.CreditTypesTreatmentB\x03\xe0\x41\x01\x12\x15\n\x08services\x18\x03 \x03(\tB\x03\xe0\x41\x01"p\n\x14\x43reditTypesTreatment\x12&\n"CREDIT_TYPES_TREATMENT_UNSPECIFIED\x10\x00\x12\x17\n\x13INCLUDE_ALL_CREDITS\x10\x01\x12\x17\n\x13\x45XCLUDE_ALL_CREDITS\x10\x02\x42y\n(com.google.cloud.billing.budgets.v1beta1P\x01ZKgoogle.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1;budgetsb\x06proto3'
),
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_type_dot_money__pb2.DESCRIPTOR,
],
)
_THRESHOLDRULE_BASIS = _descriptor.EnumDescriptor(
name="Basis",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.Basis",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="BASIS_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="CURRENT_SPEND", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="FORECASTED_SPEND",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=992,
serialized_end=1063,
)
_sym_db.RegisterEnumDescriptor(_THRESHOLDRULE_BASIS)
_FILTER_CREDITTYPESTREATMENT = _descriptor.EnumDescriptor(
name="CreditTypesTreatment",
full_name="google.cloud.billing.budgets.v1beta1.Filter.CreditTypesTreatment",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="CREDIT_TYPES_TREATMENT_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="INCLUDE_ALL_CREDITS",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="EXCLUDE_ALL_CREDITS",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1300,
serialized_end=1412,
)
_sym_db.RegisterEnumDescriptor(_FILTER_CREDITTYPESTREATMENT)
_BUDGET = _descriptor.Descriptor(
name="Budget",
full_name="google.cloud.billing.budgets.v1beta1.Budget",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.billing.budgets.v1beta1.Budget.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.cloud.billing.budgets.v1beta1.Budget.display_name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="budget_filter",
full_name="google.cloud.billing.budgets.v1beta1.Budget.budget_filter",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount",
full_name="google.cloud.billing.budgets.v1beta1.Budget.amount",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="threshold_rules",
full_name="google.cloud.billing.budgets.v1beta1.Budget.threshold_rules",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="all_updates_rule",
full_name="google.cloud.billing.budgets.v1beta1.Budget.all_updates_rule",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.billing.budgets.v1beta1.Budget.etag",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b(
"\352AZ\n$billingbudgets.googleapis.com/Budget\0222billingAccounts/{billing_account}/budgets/{budget}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=189,
serialized_end=667,
)
_BUDGETAMOUNT = _descriptor.Descriptor(
name="BudgetAmount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="specified_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.specified_amount",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_period_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.last_period_amount",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="budget_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.budget_amount",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=670,
serialized_end=835,
)
_LASTPERIODAMOUNT = _descriptor.Descriptor(
name="LastPeriodAmount",
full_name="google.cloud.billing.budgets.v1beta1.LastPeriodAmount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=837,
serialized_end=855,
)
_THRESHOLDRULE = _descriptor.Descriptor(
name="ThresholdRule",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="threshold_percent",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.threshold_percent",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="spend_basis",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.spend_basis",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_THRESHOLDRULE_BASIS],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=858,
serialized_end=1063,
)
_ALLUPDATESRULE = _descriptor.Descriptor(
name="AllUpdatesRule",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pubsub_topic",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="schema_version",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule.schema_version",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1065,
serialized_end=1137,
)
_FILTER = _descriptor.Descriptor(
name="Filter",
full_name="google.cloud.billing.budgets.v1beta1.Filter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="projects",
full_name="google.cloud.billing.budgets.v1beta1.Filter.projects",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="credit_types_treatment",
full_name="google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment",
index=1,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="services",
full_name="google.cloud.billing.budgets.v1beta1.Filter.services",
index=2,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_FILTER_CREDITTYPESTREATMENT],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1140,
serialized_end=1412,
)
_BUDGET.fields_by_name["budget_filter"].message_type = _FILTER
_BUDGET.fields_by_name["amount"].message_type = _BUDGETAMOUNT
_BUDGET.fields_by_name["threshold_rules"].message_type = _THRESHOLDRULE
_BUDGET.fields_by_name["all_updates_rule"].message_type = _ALLUPDATESRULE
_BUDGETAMOUNT.fields_by_name[
"specified_amount"
].message_type = google_dot_type_dot_money__pb2._MONEY
_BUDGETAMOUNT.fields_by_name["last_period_amount"].message_type = _LASTPERIODAMOUNT
_BUDGETAMOUNT.oneofs_by_name["budget_amount"].fields.append(
_BUDGETAMOUNT.fields_by_name["specified_amount"]
)
_BUDGETAMOUNT.fields_by_name[
"specified_amount"
].containing_oneof = _BUDGETAMOUNT.oneofs_by_name["budget_amount"]
_BUDGETAMOUNT.oneofs_by_name["budget_amount"].fields.append(
_BUDGETAMOUNT.fields_by_name["last_period_amount"]
)
_BUDGETAMOUNT.fields_by_name[
"last_period_amount"
].containing_oneof = _BUDGETAMOUNT.oneofs_by_name["budget_amount"]
_THRESHOLDRULE.fields_by_name["spend_basis"].enum_type = _THRESHOLDRULE_BASIS
_THRESHOLDRULE_BASIS.containing_type = _THRESHOLDRULE
_FILTER.fields_by_name[
"credit_types_treatment"
].enum_type = _FILTER_CREDITTYPESTREATMENT
_FILTER_CREDITTYPESTREATMENT.containing_type = _FILTER
DESCRIPTOR.message_types_by_name["Budget"] = _BUDGET
DESCRIPTOR.message_types_by_name["BudgetAmount"] = _BUDGETAMOUNT
DESCRIPTOR.message_types_by_name["LastPeriodAmount"] = _LASTPERIODAMOUNT
DESCRIPTOR.message_types_by_name["ThresholdRule"] = _THRESHOLDRULE
DESCRIPTOR.message_types_by_name["AllUpdatesRule"] = _ALLUPDATESRULE
DESCRIPTOR.message_types_by_name["Filter"] = _FILTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Budget = _reflection.GeneratedProtocolMessageType(
"Budget",
(_message.Message,),
dict(
DESCRIPTOR=_BUDGET,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""A budget is a plan that describes what you expect to spend on Cloud
projects, plus the rules to execute as spend is tracked against that
plan, (for example, send an alert when 90% of the target spend is met).
Currently all plans are monthly budgets so the usage period(s) tracked
are implied (calendar months of usage back-to-back).
Attributes:
name:
Output only. Resource name of the budget. The resource name
implies the scope of a budget. Values are of the form
``billingAccounts/{billingAccountId}/budgets/{budgetId}``.
display_name:
User data for display name in UI. Validation: <= 60 chars.
budget_filter:
Optional. Filters that define which resources are used to
compute the actual spend against the budget.
amount:
Required. Budgeted amount.
threshold_rules:
Required. Rules that trigger alerts (notifications of
thresholds being crossed) when spend exceeds the specified
percentages of the budget.
all_updates_rule:
Optional. Rules to apply to all updates to the actual spend,
regardless of the thresholds set in ``threshold_rules``.
etag:
Optional. Etag to validate that the object is unchanged for a
read-modify-write operation. An empty etag will cause an
update to overwrite other changes.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.Budget)
),
)
_sym_db.RegisterMessage(Budget)
BudgetAmount = _reflection.GeneratedProtocolMessageType(
"BudgetAmount",
(_message.Message,),
dict(
DESCRIPTOR=_BUDGETAMOUNT,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""The budgeted amount for each usage period.
Attributes:
budget_amount:
Specification for what amount to use as the budget.
specified_amount:
A specified amount to use as the budget. ``currency_code`` is
optional. If specified, it must match the currency of the
billing account. The ``currency_code`` is provided on output.
last_period_amount:
Use the last period's actual spend as the budget for the
present period.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.BudgetAmount)
),
)
_sym_db.RegisterMessage(BudgetAmount)
LastPeriodAmount = _reflection.GeneratedProtocolMessageType(
"LastPeriodAmount",
(_message.Message,),
dict(
DESCRIPTOR=_LASTPERIODAMOUNT,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""Describes a budget amount targeted to last period's spend. At this time,
the amount is automatically 100% of last period's spend; that is, there
are no other options yet. Future configuration will be described here
(for example, configuring a percentage of last period's spend).
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.LastPeriodAmount)
),
)
_sym_db.RegisterMessage(LastPeriodAmount)
ThresholdRule = _reflection.GeneratedProtocolMessageType(
"ThresholdRule",
(_message.Message,),
dict(
DESCRIPTOR=_THRESHOLDRULE,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""ThresholdRule contains a definition of a threshold which triggers an
alert (a notification of a threshold being crossed) to be sent when
spend goes above the specified amount. Alerts are automatically e-mailed
to users with the Billing Account Administrator role or the Billing
Account User role. The thresholds here have no effect on notifications
sent to anything configured under ``Budget.all_updates_rule``.
Attributes:
threshold_percent:
Required. Send an alert when this threshold is exceeded. This
is a 1.0-based percentage, so 0.5 = 50%. Validation: non-
negative number.
spend_basis:
Optional. The type of basis used to determine if spend has
passed the threshold. Behavior defaults to CURRENT\_SPEND if
not set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.ThresholdRule)
),
)
_sym_db.RegisterMessage(ThresholdRule)
AllUpdatesRule = _reflection.GeneratedProtocolMessageType(
"AllUpdatesRule",
(_message.Message,),
dict(
DESCRIPTOR=_ALLUPDATESRULE,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""AllUpdatesRule defines notifications that are sent on every update to
the billing account's spend, regardless of the thresholds defined using
threshold rules.
Attributes:
pubsub_topic:
Required. The name of the Cloud Pub/Sub topic where budget
related messages will be published, in the form
``projects/{project_id}/topics/{topic_id}``. Updates are sent
at regular intervals to the topic. The topic needs to be
created before the budget is created; see
https://cloud.google.com/billing/docs/how-to/budgets#manage-
notifications for more details. Caller is expected to have
``pubsub.topics.setIamPolicy`` permission on the topic when
it's set for a budget, otherwise, the API call will fail with
PERMISSION\_DENIED. See
https://cloud.google.com/pubsub/docs/access-control for more
details on Pub/Sub roles and permissions.
schema_version:
Required. The schema version of the notification. Only "1.0"
is accepted. It represents the JSON schema as defined in
https://cloud.google.com/billing/docs/how-
to/budgets#notification\_format
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.AllUpdatesRule)
),
)
_sym_db.RegisterMessage(AllUpdatesRule)
Filter = _reflection.GeneratedProtocolMessageType(
"Filter",
(_message.Message,),
dict(
DESCRIPTOR=_FILTER,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""A filter for a budget, limiting the scope of the cost to calculate.
Attributes:
projects:
Optional. A set of projects of the form
``projects/{project_id}``, specifying that usage from only
this set of projects should be included in the budget. If
omitted, the report will include all usage for the billing
account, regardless of which project the usage occurred on.
Only zero or one project can be specified currently.
credit_types_treatment:
Optional. If not set, default behavior is
``INCLUDE_ALL_CREDITS``.
services:
Optional. A set of services of the form
``services/{service_id}``, specifying that usage from only
this set of services should be included in the budget. If
omitted, the report will include usage for all the services.
The service names are available through the Catalog API:
https://cloud.google.com/billing/v1/how-tos/catalog-api.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.Filter)
),
)
_sym_db.RegisterMessage(Filter)
DESCRIPTOR._options = None
_BUDGET.fields_by_name["name"]._options = None
_BUDGET.fields_by_name["budget_filter"]._options = None
_BUDGET.fields_by_name["amount"]._options = None
_BUDGET.fields_by_name["threshold_rules"]._options = None
_BUDGET.fields_by_name["all_updates_rule"]._options = None
_BUDGET.fields_by_name["etag"]._options = None
_BUDGET._options = None
_THRESHOLDRULE.fields_by_name["threshold_percent"]._options = None
_THRESHOLDRULE.fields_by_name["spend_basis"]._options = None
_ALLUPDATESRULE.fields_by_name["pubsub_topic"]._options = None
_ALLUPDATESRULE.fields_by_name["schema_version"]._options = None
_FILTER.fields_by_name["projects"]._options = None
_FILTER.fields_by_name["credit_types_treatment"]._options = None
_FILTER.fields_by_name["services"]._options = None
# @@protoc_insertion_point(module_scope)
| 36.849081 | 2,327 | 0.654795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,861 | 0.422415 |
734e2605b9fe6651d724a46a3b07b21d5d438537 | 4,010 | py | Python | torchreid/optim/sam.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
]
| 1 | 2020-07-07T19:22:17.000Z | 2020-07-07T19:22:17.000Z | torchreid/optim/sam.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
]
| 1 | 2020-06-04T15:22:09.000Z | 2020-06-04T15:22:09.000Z | torchreid/optim/sam.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
]
| 4 | 2020-07-02T09:23:11.000Z | 2020-08-21T08:24:13.000Z | # Copyright 2020 Google Research
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
'''
Imported from: https://github.com/google-research/sam
'''
import torch
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=True, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
self.base_optimizer = base_optimizer
defaults = dict(rho=rho, adaptive=adaptive, **self.base_optimizer.defaults)
super().__init__(params, defaults)
self.rho = rho
self.adaptive = adaptive
self.param_groups = self.base_optimizer.param_groups
@torch.no_grad()
def first_step(self, zero_grad=False):
if self._has_overflow(self.param_groups):
if zero_grad: self.zero_grad()
return True
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = self.rho / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_p"] = p.data.clone()
e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
if zero_grad: self.zero_grad()
return False
@torch.no_grad()
def second_step(self, zero_grad=False):
if self._has_overflow(self.param_groups):
if zero_grad: self.zero_grad()
return
for group in self.param_groups:
for p in group["params"]:
if p.grad is None: continue
p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad: self.zero_grad()
@torch.no_grad()
def step(self):
raise NotImplementedError("SAM doesn't work like the other optimizers,"
" you should first call `first_step` and the `second_step`;")
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack([
((torch.abs(p) if self.adaptive else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
@staticmethod
def _has_overflow(params):
''' Check whether the gradient overflow occurred in model parameters '''
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
for group in params:
for p in group["params"]:
if p.grad is not None and _has_inf_or_nan(p.grad.data):
return True
return False
| 37.830189 | 131 | 0.56783 | 3,773 | 0.940898 | 0 | 0 | 2,735 | 0.682045 | 0 | 0 | 1,167 | 0.291022 |
735158944908fbafce88d97668526717a22003eb | 11,958 | py | Python | src/konfiger_stream.py | konfiger/konfiger-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
]
| 4 | 2019-09-25T02:18:43.000Z | 2020-01-21T19:16:05.000Z | src/konfiger_stream.py | keyvaluedb/key-value-db-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
]
| null | null | null | src/konfiger_stream.py | keyvaluedb/key-value-db-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
]
| null | null | null |
"""
The MIT License
Copyright 2020 Adewale Azeez <[email protected]>.
"""
import os.path
from .konfiger_util import type_of, is_string, is_char, is_bool, escape_string, un_escape_string
def file_stream(file_path, delimiter = '=', separator = '\n', err_tolerance = False):
return KonfigerStream(file_path, delimiter, separator, err_tolerance, True)
def string_stream(raw_string, delimiter = '=', separator = '\n', err_tolerance = False):
return KonfigerStream(raw_string, delimiter, separator, err_tolerance, False)
def validate_file_existence(file_path):
if not file_path:
raise TypeError("The file path cannot be None")
if not is_string(file_path):
raise TypeError("Invalid argument expecting str found " + str(type(file_path)))
if not os.path.isfile(file_path):
raise FileNotFoundError("The file does not exists: " + file_path)
class KonfigerStream:
def __init__(self, stream_obj, delimiter, separator, err_tolerance, is_file):
self.stream_obj = stream_obj
self.delimiter = delimiter
self.separator = separator
self.err_tolerance = err_tolerance
self.is_file = is_file
self.trimming_key = True
self.trimming_value = True
self.comment_prefix = "//"
self.continuation_char = "\\"
self.is_first = 0
if is_file:
validate_file_existence(stream_obj)
else:
if not is_string(stream_obj):
raise TypeError("Invalid argument expecting str found " + str(type(stream_obj)))
if not is_bool(err_tolerance):
raise TypeError("Invalid argument for err_tolerance expecting bool found " + str(type(err_tolerance)))
if delimiter and not separator:
raise TypeError("Invalid length of argument, separator or delimiter parameter is missing")
if not is_char(self.delimiter):
raise TypeError("Invalid argument for delimiter expecting char found " + str(type(self.delimiter)))
if not is_char(self.separator):
raise TypeError("Invalid argument for separator expecting char found " + str(type(self.separator)))
self.read_position = 0
self.has_next_ = False
self.done_reading_ = False
def is_trimming_key(self):
return self.trimming_key
def set_trimming_key(self, trimming_key):
if not is_bool(trimming_key):
raise TypeError("Invalid argument, expecting a bool found " + str(type(trimming_key)))
self.trimming_key = trimming_key
def is_trimming_value(self):
return self.trimming_value
def set_trimming_value(self, trimming_value):
if not is_bool(trimming_value):
raise TypeError("Invalid argument, expecting a bool found " + str(type(trimming_value)))
self.trimming_value = trimming_value
def get_comment_prefix(self):
return self.comment_prefix
def set_comment_prefix(self, comment_prefix):
if not is_string(comment_prefix):
raise TypeError("Invalid argument for comment prefix expecting str found " + str(type(comment_prefix)))
self.comment_prefix = comment_prefix
def get_continuation_char(self):
return self.continuation_char
def set_continuation_char(self, continuation_char):
if not is_char(continuation_char):
raise TypeError("Invalid argument for continuation char expecting char found " + str(type(continuation_char)))
self.continuation_char = continuation_char
def is_error_tolerant(self):
return self.err_tolerance
def error_tolerance(self, err_tolerance):
if not is_bool(err_tolerance):
raise TypeError("Invalid argument for err_tolerance expecting char found " + str(type(err_tolerance)))
self.err_tolerance = err_tolerance
def has_next(self):
if not self.done_reading_:
comment_size = len(self.comment_prefix)
sub_count = 0
if self.is_file:
with open(self.stream_obj, "r") as f:
byte = f.read(1)
f.seek(self.read_position)
if not byte:
self.done_reading()
return self.has_next_
while byte:
byte = f.read(1)
while sub_count < comment_size and byte == self.comment_prefix[sub_count]:
sub_count += 1
f.seek(self.read_position+sub_count)
byte = f.read(1)
self.is_first |= 1
if sub_count == comment_size:
self.read_position += 1
while byte and byte != self.separator:
self.read_position += 1
f.seek(self.read_position)
byte = f.read(1)
return self.has_next()
if byte.strip() == '':
self.read_position += 1
f.seek(self.read_position)
continue
self.has_next_ = True
return self.has_next_
self.has_next_ = False
return self.has_next_
else:
while self.read_position < len(self.stream_obj):
while sub_count < comment_size and self.stream_obj[sub_count+self.read_position] == self.comment_prefix[sub_count]:
sub_count += 1
if sub_count == comment_size:
self.read_position += 1
while self.read_position < len(self.stream_obj) and self.stream_obj[self.read_position] != self.separator:
self.read_position += 1
self.read_position += 1
return self.has_next()
if self.stream_obj[self.read_position].strip() == "":
self.read_position += 1
continue
self.has_next_ = True
return self.has_next_
self.has_next_ = False
return self.has_next_
return self.has_next_
def next(self):
if self.done_reading_:
raise BufferError("You cannot read beyound the stream length, always use has_next() to verify the Stream still has an entry")
key = ""
value = ""
parse_key = True
prev_char = None
prev_prev_char = None
i = '\0'
line = 1
column = 0
if self.is_file:
with open(self.stream_obj, "r") as f:
while True:
byte = f.read(1)
f.seek(self.read_position)
if not byte:
if key != "":
if parse_key == True and self.err_tolerance == False:
raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column))
self.done_reading()
break
self.read_position += 1
char_ = f.read(1)
column += 1
if char_ == '\n':
line += 1
column = 0
if not parse_key and prev_char == self.continuation_char and prev_prev_char != '\\':
if value[len(value)-1] == '\r':
value = value[:-2]
else:
value = value[:-1]
while char_.strip() == "":
f.seek(self.read_position)
self.read_position += 1
char_ = f.read(1)
self.read_position -= 1
continue
if char_ == self.separator and prev_char != '^':
if len(key) == 0 and value == "":
continue
if parse_key == True and self.err_tolerance == False:
raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column))
break
if char_ == self.delimiter and parse_key:
if value != "" and self.err_tolerance != False:
raise LookupError("The input is imporperly sepreated near Line " + str(line) + ":" + str(column)+". Check the separator")
parse_key = False
continue
if parse_key == True:
key += char_
else:
value += char_
prev_prev_char = prev_prev_char if char_ == '\r' else prev_char
prev_char = ('\0' if prev_char != '\\' else '\\') if char_ == '\r' else char_
else:
for self.read_position in range(self.read_position, len(self.stream_obj)+1):
if self.read_position == len(self.stream_obj):
if key != "":
if parse_key == True and self.err_tolerance == False:
raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column))
self.done_reading()
break
character = self.stream_obj[self.read_position]
column += 1
if character == '\n':
line += 1
column = 0
if not parse_key and prev_char == self.continuation_char and prev_prev_char != '\\':
if value[len(value)-1] == '\r':
value = value[:-2]
else:
value = value[:-1]
while character.strip() == "":
self.read_position += 1
character = self.stream_obj[self.read_position]
self.read_position -= 1
continue
if character == self.separator and prev_char != '^' and not parse_key:
if key == "" and value =="":
continue
if parse_key == True and self.err_tolerance == False:
raise LookupError("Invalid entry detected near Line " + str(line) + ":" + str(column))
break
if character == self.delimiter and parse_key:
if value != "" and self.err_tolerance == False:
raise LookupError("The input is imporperly sepreated near Line " + str(line) + ":" + str(column)+". Check the separator")
parse_key = False
continue
if parse_key:
key += character
else:
value += character
prev_prev_char = prev_prev_char if character == '\r' else prev_char
prev_char = ('\0' if prev_char != '\\' else '\\') if character == '\r' else character
self.read_position += 1
return (
key.strip() if self.trimming_key else key,
un_escape_string(value, self.separator).strip() if self.trimming_value else un_escape_string(value, self.separator)
)
def done_reading(self):
self.has_next_ = False
self.done_reading_ = True
| 45.816092 | 150 | 0.511373 | 11,052 | 0.924235 | 0 | 0 | 0 | 0 | 0 | 0 | 1,257 | 0.105118 |
735211327e137e292f3ce5c7750409c77a35d0dd | 2,674 | py | Python | matematik.py | Drummersbrother/math_for_school | 5eaa22320298d59b770e68b9640b34a525875132 | [
"MIT"
]
| null | null | null | matematik.py | Drummersbrother/math_for_school | 5eaa22320298d59b770e68b9640b34a525875132 | [
"MIT"
]
| null | null | null | matematik.py | Drummersbrother/math_for_school | 5eaa22320298d59b770e68b9640b34a525875132 | [
"MIT"
]
| null | null | null | import math
import numpy as np
import collections
import scipy.stats as sst
import matplotlib.pyplot as plt
def plot(*args, **kwargs):
plt.plot(*args, **kwargs)
plt.show()
def linregshow(x, y, col: str="r"):
linregresult = sst.linregress(list(zip(x, y)))
plot(x, y, col, x, [(val * linregresult.slope) + linregresult.intercept for val in x])
return linregresult
def list_or_starargs(func):
"""This is a decorator to specify that a function either takes iterable input in the form of an iterable or a list of passed arguments.
If other arguments are needed, the function will need to use kwargs.
This passes the list as the first argument."""
def decorated(*args, **kwargs):
if isinstance(args[0], collections.Iterable):
data = args[0]
# We make generators into lists
data = [val for val in data]
else:
data = args
return func(data, **kwargs)
return decorated
@list_or_starargs
def spridning(data):
"""Returns the size of the range of values in the data."""
return max(data) - min(data)
@list_or_starargs
def medel(data):
"""Returns the arithmetic mean."""
return sum(data) / len(data)
@list_or_starargs
def median(data):
"""Returns the median."""
# We sort the data
data = sorted(data)
length = len(data)
if length % 2 == 0:
return medel(data[length // 2], data[(length // 2) - 1])
else:
return data[int(length // 2)]
@list_or_starargs
def kvartiler(data):
"""Returns the three quartiles of the data in order: lower, median, higher."""
# We sort the data
data = sorted(data)
# We divide the data into two lists
length = len(data)
if length % 2 == 1:
low_list = data[:(length // 2)]
high_list = data[((length // 2) + 1):]
else:
low_list = data[:int(length / 2)]
high_list = data[int(length / 2):]
# We return the three quartiles
return median(low_list), median(data), median(high_list)
def standardav(data, stick=False):
"""Returns the standard deviation of the input data, which has to be an iterable. stick specifies if it should be treated like
non-total set of values (divide by n-1 instead of n)."""
div_by = len(data) if (not stick) else (len(data) - 1)
medelv = medel(data)
return math.sqrt(sum([(val-medelv)**2 for val in data]) / div_by)
def normal_d(x, u, o):
"""Returns the value of a normal/standard distribution at the value x. u is Mu, and o is the standard deviation."""
return (1 / (o * math.sqrt(2*math.pi))) * (math.e ** (-(((x-u)**2) / (2 * (o**2)))))
| 33.848101 | 139 | 0.628646 | 0 | 0 | 0 | 0 | 1,051 | 0.393044 | 0 | 0 | 893 | 0.333957 |
7352cdca72cd11a42b689b908ad454fb587ad295 | 5,362 | py | Python | docly/ioutils/__init__.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
]
| 29 | 2020-12-31T08:27:32.000Z | 2022-02-15T08:48:51.000Z | docly/ioutils/__init__.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
]
| 4 | 2020-12-30T18:18:54.000Z | 2021-08-03T14:42:35.000Z | docly/ioutils/__init__.py | autosoft-dev/docly | 0bd6216b8a9735e9fa76bffd4ffea6cec6cc4a01 | [
"MIT"
]
| 2 | 2022-01-04T17:58:22.000Z | 2022-02-05T13:04:14.000Z | import os
from pathlib import Path
import requests
import shutil
import sys
from distutils.version import LooseVersion
import time
from tqdm import tqdm
from docly.parser import parser as py_parser
from docly.tokenizers import tokenize_code_string
from docly import __version__
# from c2nl.objects import Code
UPDATE_CHECK_URL = "http://3.80.2.138:8584/vercheck/check-version/"
# UPDATE_CHECK_URL = "http://127.0.0.1:5000/vercheck/check-version/"
interaction_cache = lambda : Path(Path.home() / ".docly" / "interaction_cache")
CACHE_DIR = (Path().home() / ".docly" / "file_cache")
cache_exists = lambda : CACHE_DIR.exists()
make_cache_dir = lambda : os.mkdir(str(CACHE_DIR))
def _compare_installed_version_with_latest(v1, v2):
try:
current_version = LooseVersion(v1)
latest_version = LooseVersion(v2)
assert current_version == latest_version
return True
except AssertionError:
return False
def look_for_update():
with requests.sessions.Session() as s:
try:
r = s.get(UPDATE_CHECK_URL, timeout=2)
r.raise_for_status()
if not _compare_installed_version_with_latest(__version__, r.text):
i_c = interaction_cache()
return True
return False
except Exception:
i_c = interaction_cache()
if not i_c.exists():
os.mkdir(i_c)
if not (i_c / "icache.txt").exists():
with open((i_c / "icache.txt"), "w") as f:
f.write(str(int(time.time())) + "\n")
else:
with open((i_c / "icache.txt"), "a") as f:
f.write(str(int(time.time())) + "\n")
return False
def is_dir(base_path):
if isinstance(base_path, Path):
return base_path.is_dir()
elif isinstance(base_path, str):
return Path(base_path).is_dir()
else:
return False
def is_python_file(file_path):
if isinstance(file_path, Path):
return file_path.suffix == ".py"
elif isinstance(file_path, str):
return Path(file_path).suffix == ".py"
else:
return False
def is_ipynb_notebook(file_path):
if isinstance(file_path, Path):
return file_path.suffix == ".ipynb"
elif isinstance(file_path, str):
return Path(file_path).suffix == ".ipynb"
else:
return False
def download_from_url(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=dst.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def check_out_path(target_path: Path):
""""
This function recursively yields all contents of a pathlib.Path object
"""
yield target_path
for file in target_path.iterdir():
if file.is_dir():
yield from check_out_path(file)
else:
yield file.absolute()
def process_file(file_path: Path, ts_lib_path: str, use_old=False):
result, parser_obj = py_parser.parse(file_path, ts_lib_path)
func_and_params = parser_obj.get_all_function_names_with_params()
if result:
for func_name, data in py_parser.get_func_body_and_docstr(parser_obj):
# print(py_toeknizer.tokenize_code_string(func_body))
# code.tokens = tokenizer.tokenize(func_body).data
# code.text = func_body
(func_body, docstr), start, end = data
ret_start = (start[0]+1, start[1])
params = func_and_params[func_name]
code_str = [tokenize_code_string(func_body)] if use_old else func_body
yield code_str, params, ret_start, func_name, docstr.strip()
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes", "no", or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '{}}'".format(default))
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 31.356725 | 82 | 0.610966 | 0 | 0 | 1,074 | 0.200298 | 0 | 0 | 0 | 0 | 1,105 | 0.20608 |
735333a22976a616b2a1727e7723502f1d1387bb | 21 | bzl | Python | java/version.bzl | symonk/selenium | 38e89630f1c4b1a0d3ac6e17765a6ccb58160f83 | [
"Apache-2.0"
]
| null | null | null | java/version.bzl | symonk/selenium | 38e89630f1c4b1a0d3ac6e17765a6ccb58160f83 | [
"Apache-2.0"
]
| null | null | null | java/version.bzl | symonk/selenium | 38e89630f1c4b1a0d3ac6e17765a6ccb58160f83 | [
"Apache-2.0"
]
| null | null | null | SE_VERSION = "4.2.1"
| 10.5 | 20 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.333333 |
735414eb5a0cf25ba65326dd7cc3a0b2acaea272 | 2,978 | py | Python | scripts/preprocess_for_prediction.py | jmueller95/deepgrind | 1fdca224a5256b820fa817a529e79b70c8808d65 | [
"Apache-2.0"
]
| null | null | null | scripts/preprocess_for_prediction.py | jmueller95/deepgrind | 1fdca224a5256b820fa817a529e79b70c8808d65 | [
"Apache-2.0"
]
| null | null | null | scripts/preprocess_for_prediction.py | jmueller95/deepgrind | 1fdca224a5256b820fa817a529e79b70c8808d65 | [
"Apache-2.0"
]
| null | null | null | import pandas as pd
import utils
def check_msms_model_name(converter):
def wrapper(*args, **kwargs):
if kwargs['style'] not in ["pdeep", "prosit"]:
raise Exception("MSMS model must be 'pdeep' or 'prosit'")
converter(*args, **kwargs)
return wrapper
@check_msms_model_name
def _convert_for_msms(comet_df, style, output):
if style == "prosit":
res = pd.DataFrame(
{"modified_sequence": comet_df.Peptide.apply(
lambda pep: utils.find_modifications(pep[2:-2], style="prosit")).values,
"collision_energy": snakemake.params['collision_energy'],
"precursor_charge": comet_df.apply(lambda row: row.Charge1 + 2 * row.Charge2 + 3 * row.Charge3, axis=1)})
res.dropna(inplace=True)
res.to_csv(output, sep=",", header=True, index=False)
else:
res = pd.DataFrame(
comet_df.Peptide.apply(lambda pep: utils.find_modifications(pep[2:-2], style="pdeep")).to_list(),
columns=["peptide", "modification"])
# The charge is one-hot encoded in the comet df, so we can resolve this into 1,2 or 3 by multiplying 1,2 and 3
# with the entries of Charge1, Charge2 and Charge3
res["charge"] = comet_df.apply(lambda row: row.Charge1 + 2 * row.Charge2 + 3 * row.Charge3, axis=1)
res.dropna(inplace=True)
res.to_csv(output, sep="\t", header=True, index=False)
@check_msms_model_name
def _convert_for_rt(comet_df, style, output):
if style == "prosit":
res = pd.DataFrame(
{"modified_sequence": comet_df.Peptide.apply(lambda pep: utils.find_modifications(pep[2:-2], style="prosit")).values})
res.dropna(inplace=True)
res.to_csv(output, sep=",", header=True, index=False)
else:
raise Exception("Not implemented. Right now, the only accepted RT Model is 'prosit'.")
def main():
# Parse the input file:
comet_df = pd.read_csv(snakemake.input[0], sep="\t", header=0,
usecols=["Peptide", "Charge1", "Charge2", "Charge3"],
index_col=False)
# Determine if MSMS and RT prediction will be performed jointly or separately
if "msms_model" in dict(snakemake.params) and "rt_model" in dict(snakemake.params):
_convert_for_msms(comet_df, style=snakemake.params['msms_model'].lower(),
output=snakemake.output['msms_prediction_input'])
_convert_for_rt(comet_df, style=snakemake.params['rt_model'].lower(),
output=snakemake.output['rt_prediction_input'])
else:
# If only one model was supplied, the prediction will be joint
# Only convert the input for msms in that case
_convert_for_msms(comet_df, style=snakemake.params['model'].lower(),
output=snakemake.output['prediction_input'])
if __name__ == '__main__':
main()
| 46.53125 | 131 | 0.624244 | 0 | 0 | 0 | 0 | 1,614 | 0.541974 | 0 | 0 | 840 | 0.282069 |
7354ded194b9ee5cde59d94c66a6556bf76f8b32 | 1,497 | py | Python | GettingStarted/gettingstarted.py | rohitp934/roadtoadatascientist | 50724b63c2692659cdd48e9ed20e856c231695fd | [
"MIT"
]
| null | null | null | GettingStarted/gettingstarted.py | rohitp934/roadtoadatascientist | 50724b63c2692659cdd48e9ed20e856c231695fd | [
"MIT"
]
| null | null | null | GettingStarted/gettingstarted.py | rohitp934/roadtoadatascientist | 50724b63c2692659cdd48e9ed20e856c231695fd | [
"MIT"
]
| null | null | null | #importing necessary modules
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Data and labels
Xtrain = [[182, 80, 34], [176, 70, 33], [161, 60, 28], [154, 55, 27], [166, 63, 30], [189, 90, 36], [175, 63, 28], [177, 71, 30], [159, 52, 27], [171, 72, 32], [181, 85, 34]]
Ytrain = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']
Xval = [[163, 62, 28], [182, 80, 35], [150, 50, 24], [160, 57, 27], [175, 62, 30], [183, 67, 32], [177, 64, 29], [164, 62, 29], [157, 53, 23], [170, 73, 32], [169, 59, 29]]
Yval = ['female', 'male', 'female', 'female', 'male', 'male', 'female', 'female',
'female', 'male', 'female']
# initializing the ML models
knn = KNeighborsClassifier()
perceptron = Perceptron()
# Fitting the models
knn.fit(Xtrain, Ytrain)
perceptron.fit(Xtrain, Ytrain)
# Testing using our input data
pred_knn = knn.predict(Xval)
acc_knn = accuracy_score(Yval, pred_knn) * 100
print(f'Accuracy for knn: {acc_knn}')
pred_perceptron = perceptron.predict(Xval)
acc_perceptron = accuracy_score(Yval, pred_perceptron) * 100
print(f'Accuracy for perceptron: {acc_perceptron}')
# The best classifier out of the two models
index = np.argmax([acc_knn, acc_perceptron])
#argmax function assigns the index of the maximum value to the variable
classifiers = {0: 'KNN', 1:'PER'}
print(f'Best gender classifier is {classifiers[index]}')
| 38.384615 | 174 | 0.676687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 526 | 0.351369 |
7356af2b787834d2216080e3079e961a0d62871f | 909 | py | Python | libs/optimizers.py | bxtkezhan/AILabs | 6328aa65a3ce5f450389a5a848b641ba36f0e9c5 | [
"MIT"
]
| null | null | null | libs/optimizers.py | bxtkezhan/AILabs | 6328aa65a3ce5f450389a5a848b641ba36f0e9c5 | [
"MIT"
]
| null | null | null | libs/optimizers.py | bxtkezhan/AILabs | 6328aa65a3ce5f450389a5a848b641ba36f0e9c5 | [
"MIT"
]
| null | null | null | import numpy as np
class SGD:
def __init__(self, lr=0.01, momentum=0.0, decay=0.0, nesterov=False,
maximum=None, minimum=None):
self.lr = lr
self.momentum = momentum
self.decay = decay
self.nesterov = nesterov
self.idx = None
self.maximum = maximum or lr
self.minimum = minimum or 0.0
if self.maximum <= self.minimum:
raise TypeError('maximum 必须大于 minimum')
def __call__(self, sample_size, batch_size, status='begin'):
if status == 'begin':
self.idx = np.arange(sample_size)
elif status == 'time':
self.idx = np.random.permutation(self.idx)
self.lr = self.lr - self.decay
self.lr = min(self.lr, self.maximum)
self.lr = max(self.lr, self.minimum)
elif status == 'epoch':
pass
return self.idx, self.lr
| 31.344828 | 72 | 0.563256 | 896 | 0.977099 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.062159 |
735716881d6460c9c4e13489b7256920b070c665 | 122,809 | py | Python | pennylane/transforms/qcut.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
]
| null | null | null | pennylane/transforms/qcut.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
]
| null | null | null | pennylane/transforms/qcut.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 Xanadu Quantum Technologies Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions for performing quantum circuit cutting.
"""
import copy
import inspect
import string
import uuid
import warnings
from collections.abc import Sequence as SequenceType
from dataclasses import InitVar, dataclass
from functools import partial
from itertools import compress, product
from pathlib import Path
from typing import Any, Callable, ClassVar, Dict, List, Optional, Sequence, Tuple, Union
from networkx import MultiDiGraph, has_path, weakly_connected_components
import pennylane as qml
from pennylane import apply, expval
from pennylane import numpy as np
from pennylane.grouping import string_to_pauli_word
from pennylane.measurements import Expectation, MeasurementProcess, Sample
from pennylane.operation import Operation, Operator, Tensor
from pennylane.ops.qubit.non_parametric_ops import WireCut
from pennylane.tape import QuantumTape
from pennylane.wires import Wires
from .batch_transform import batch_transform
class MeasureNode(Operation):
"""Placeholder node for measurement operations"""
num_wires = 1
grad_method = None
def __init__(self, *params, wires=None, do_queue=True, id=None):
id = id or str(uuid.uuid4())
super().__init__(*params, wires=wires, do_queue=do_queue, id=id)
class PrepareNode(Operation):
"""Placeholder node for state preparations"""
num_wires = 1
grad_method = None
def __init__(self, *params, wires=None, do_queue=True, id=None):
id = id or str(uuid.uuid4())
super().__init__(*params, wires=wires, do_queue=do_queue, id=id)
def replace_wire_cut_node(node: WireCut, graph: MultiDiGraph):
"""
Replace a :class:`~.WireCut` node in the graph with a :class:`~.MeasureNode`
and :class:`~.PrepareNode`.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
node (WireCut): the :class:`~.WireCut` node to be replaced with a :class:`~.MeasureNode`
and :class:`~.PrepareNode`
graph (nx.MultiDiGraph): the graph containing the node to be replaced
**Example**
Consider the following circuit with a manually-placed wire cut:
.. code-block:: python
wire_cut = qml.WireCut(wires=0)
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut)
qml.RY(0.5, wires=0)
qml.expval(qml.PauliZ(0))
We can find the circuit graph and remove the wire cut node using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_node(wire_cut, graph)
"""
predecessors = graph.pred[node]
successors = graph.succ[node]
predecessor_on_wire = {}
for op, data in predecessors.items():
for d in data.values():
wire = d["wire"]
predecessor_on_wire[wire] = op
successor_on_wire = {}
for op, data in successors.items():
for d in data.values():
wire = d["wire"]
successor_on_wire[wire] = op
order = graph.nodes[node]["order"]
graph.remove_node(node)
for wire in node.wires:
predecessor = predecessor_on_wire.get(wire, None)
successor = successor_on_wire.get(wire, None)
meas = MeasureNode(wires=wire)
prep = PrepareNode(wires=wire)
# We are introducing a degeneracy in the order of the measure and prepare nodes
# here but the order can be inferred as MeasureNode always precedes
# the corresponding PrepareNode
graph.add_node(meas, order=order)
graph.add_node(prep, order=order)
graph.add_edge(meas, prep, wire=wire)
if predecessor is not None:
graph.add_edge(predecessor, meas, wire=wire)
if successor is not None:
graph.add_edge(prep, successor, wire=wire)
def replace_wire_cut_nodes(graph: MultiDiGraph):
"""
Replace each :class:`~.WireCut` node in the graph with a
:class:`~.MeasureNode` and :class:`~.PrepareNode`.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): The graph containing the :class:`~.WireCut` nodes
to be replaced
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the circuit graph and remove all the wire cut nodes using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
"""
for op in list(graph.nodes):
if isinstance(op, WireCut):
replace_wire_cut_node(op, graph)
def _add_operator_node(graph: MultiDiGraph, op: Operator, order: int, wire_latest_node: dict):
"""
Helper function to add operators as nodes during tape to graph conversion.
"""
graph.add_node(op, order=order)
for wire in op.wires:
if wire_latest_node[wire] is not None:
parent_op = wire_latest_node[wire]
graph.add_edge(parent_op, op, wire=wire)
wire_latest_node[wire] = op
def tape_to_graph(tape: QuantumTape) -> MultiDiGraph:
"""
Converts a quantum tape to a directed multigraph.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): tape to be converted into a directed multigraph
Returns:
nx.MultiDiGraph: a directed multigraph that captures the circuit structure
of the input tape
**Example**
Consider the following tape:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.9, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(1))
Its corresponding circuit graph can be found using
>>> qml.transforms.qcut.tape_to_graph(tape)
<networkx.classes.multidigraph.MultiDiGraph at 0x7fe41cbd7210>
"""
graph = MultiDiGraph()
wire_latest_node = {w: None for w in tape.wires}
for order, op in enumerate(tape.operations):
_add_operator_node(graph, op, order, wire_latest_node)
order += 1 # pylint: disable=undefined-loop-variable
for m in tape.measurements:
obs = getattr(m, "obs", None)
if obs is not None and isinstance(obs, Tensor):
if m.return_type is Sample:
raise ValueError(
"Sampling from tensor products of observables "
"is not supported in circuit cutting"
)
for o in obs.obs:
m_ = MeasurementProcess(m.return_type, obs=o)
_add_operator_node(graph, m_, order, wire_latest_node)
elif m.return_type is Sample and obs is None:
for w in m.wires:
s_ = qml.sample(qml.Projector([1], wires=w))
_add_operator_node(graph, s_, order, wire_latest_node)
else:
_add_operator_node(graph, m, order, wire_latest_node)
order += 1
return graph
# pylint: disable=too-many-branches
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a graph into a collection of subgraphs as well as returning
the communication (`quotient <https://en.wikipedia.org/wiki/Quotient_graph>`__)
graph.
The input ``graph`` is fragmented by disconnecting each :class:`~.MeasureNode` and
:class:`~.PrepareNode` pair and finding the resultant disconnected subgraph fragments.
Each node of the communication graph represents a subgraph fragment and the edges
denote the flow of qubits between fragments due to the removed :class:`~.MeasureNode` and
:class:`~.PrepareNode` pairs.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
Tuple[Tuple[nx.MultiDiGraph], nx.MultiDiGraph]: the subgraphs of the cut graph
and the communication graph.
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
>>> qml.transforms.qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
graph_copy = graph.copy()
cut_edges = []
measure_nodes = [n for n in graph.nodes if isinstance(n, MeasurementProcess)]
for node1, node2, wire_key in graph.edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2, wire_key))
graph_copy.remove_edge(node1, node2, key=wire_key)
subgraph_nodes = weakly_connected_components(graph_copy)
subgraphs = tuple(MultiDiGraph(graph_copy.subgraph(n)) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2, _ in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
if start_fragment != end_fragment:
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
else:
# The MeasureNode and PrepareNode pair live in the same fragment and did not result
# in a disconnection. We can therefore remove these nodes. Note that we do not need
# to worry about adding back an edge between the predecessor to node1 and the successor
# to node2 because our next step is to convert the fragment circuit graphs to tapes,
# a process that does not depend on edge connections in the subgraph.
subgraphs[start_fragment].remove_node(node1)
subgraphs[end_fragment].remove_node(node2)
terminal_indices = [i for i, s in enumerate(subgraphs) for n in measure_nodes if s.has_node(n)]
subgraphs_connected_to_measurements = []
subgraphs_indices_to_remove = []
prepare_nodes_removed = []
for i, s in enumerate(subgraphs):
if any(has_path(communication_graph, i, t) for t in terminal_indices):
subgraphs_connected_to_measurements.append(s)
else:
subgraphs_indices_to_remove.append(i)
prepare_nodes_removed.extend([n for n in s.nodes if isinstance(n, PrepareNode)])
measure_nodes_to_remove = [
m for p in prepare_nodes_removed for m, p_, _ in cut_edges if p is p_
]
communication_graph.remove_nodes_from(subgraphs_indices_to_remove)
for m in measure_nodes_to_remove:
for s in subgraphs_connected_to_measurements:
if s.has_node(m):
s.remove_node(m)
return subgraphs_connected_to_measurements, communication_graph
def _find_new_wire(wires: Wires) -> int:
"""Finds a new wire label that is not in ``wires``."""
ctr = 0
while ctr in wires:
ctr += 1
return ctr
# pylint: disable=protected-access
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding :class:`~.QuantumTape`.
To account for the possibility of needing to perform mid-circuit measurements, if any operations
follow a :class:`MeasureNode` operation on a given wire then these operations are mapped to a
new wire.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
graph (nx.MultiDiGraph): directed multigraph to be converted to a tape
Returns:
QuantumTape: the quantum tape corresponding to the input graph
**Example**
Consider the following circuit:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.RY(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.transforms.qcut.MeasureNode(wires=1)
qml.transforms.qcut.PrepareNode(wires=1)
qml.CNOT(wires=[1, 0])
qml.expval(qml.PauliZ(0))
This circuit contains operations that follow a :class:`~.MeasureNode`. These operations will
subsequently act on wire ``2`` instead of wire ``1``:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> tape = qml.transforms.qcut.graph_to_tape(graph)
>>> print(tape.draw())
0: ──RX(0.4)──────╭C───────────────╭X──┤ ⟨Z⟩
1: ──RY(0.5)──────╰X──MeasureNode──│───┤
2: ──PrepareNode───────────────────╰C──┤
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
reverse_wire_map = {v: k for k, v in wire_map.items()}
copy_ops = [copy.copy(op) for _, op in ordered_ops if not isinstance(op, MeasurementProcess)]
copy_meas = [copy.copy(op) for _, op in ordered_ops if isinstance(op, MeasurementProcess)]
observables = []
with QuantumTape() as tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
# TODO: find a better way to update operation wires
op._wires = new_wires
apply(op)
if isinstance(op, MeasureNode):
assert len(op.wires) == 1
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
original_wire = reverse_wire_map[measured_wire]
wire_map[original_wire] = new_wire
reverse_wire_map[new_wire] = original_wire
if copy_meas:
return_types = set(meas.return_type for meas in copy_meas)
if len(return_types) > 1:
raise ValueError(
"Only a single return type can be used for measurement "
"nodes in graph_to_tape"
)
return_type = return_types.pop()
if return_type not in {Sample, Expectation}:
raise ValueError(
"Invalid return type. Only expectation value and sampling measurements "
"are supported in graph_to_tape"
)
for meas in copy_meas:
obs = meas.obs
obs._wires = Wires([wire_map[w] for w in obs.wires])
observables.append(obs)
if return_type is Sample:
apply(meas)
if return_type is Expectation:
if len(observables) > 1:
qml.expval(Tensor(*observables))
else:
qml.expval(obs)
return tape
def _get_measurements(
group: Sequence[Operator], measurements: Sequence[MeasurementProcess]
) -> List[MeasurementProcess]:
"""Pairs each observable in ``group`` with the circuit ``measurements``.
Only a single measurement of an expectation value is currently supported
in ``measurements``.
Args:
group (Sequence[Operator]): a collection of observables
measurements (Sequence[MeasurementProcess]): measurements from the circuit
Returns:
List[MeasurementProcess]: the expectation values of ``g @ obs``, where ``g`` is iterated
over ``group`` and ``obs`` is the observable composing the single measurement
in ``measurements``
"""
if len(group) == 0:
# This ensures the measurements of the original tape are carried over to the
# following tape configurations in the absence of any MeasureNodes in the fragment
return measurements
n_measurements = len(measurements)
if n_measurements > 1:
raise ValueError(
"The circuit cutting workflow only supports circuits with a single output "
"measurement"
)
if n_measurements == 0:
return [expval(g) for g in group]
measurement = measurements[0]
if measurement.return_type is not Expectation:
raise ValueError(
"The circuit cutting workflow only supports circuits with expectation "
"value measurements"
)
obs = measurement.obs
return [expval(copy.copy(obs) @ g) for g in group]
def _prep_zero_state(wire):
qml.Identity(wire)
def _prep_one_state(wire):
qml.PauliX(wire)
def _prep_plus_state(wire):
qml.Hadamard(wire)
def _prep_minus_state(wire):
qml.PauliX(wire)
qml.Hadamard(wire)
def _prep_iplus_state(wire):
qml.Hadamard(wire)
qml.S(wires=wire)
def _prep_iminus_state(wire):
qml.PauliX(wire)
qml.Hadamard(wire)
qml.S(wires=wire)
PREPARE_SETTINGS = [_prep_zero_state, _prep_one_state, _prep_plus_state, _prep_iplus_state]
def expand_fragment_tape(
tape: QuantumTape,
) -> Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]:
"""
Expands a fragment tape into a sequence of tapes for each configuration of the contained
:class:`MeasureNode` and :class:`PrepareNode` operations.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the fragment tape containing :class:`MeasureNode` and
:class:`PrepareNode` operations to be expanded
Returns:
Tuple[List[QuantumTape], List[PrepareNode], List[MeasureNode]]: the
tapes corresponding to each configuration and the order of preparation nodes and
measurement nodes used in the expansion
**Example**
Consider the following circuit, which contains a :class:`~.MeasureNode` and
:class:`~.PrepareNode` operation:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.transforms.qcut.PrepareNode(wires=0)
qml.RX(0.5, wires=0)
qml.transforms.qcut.MeasureNode(wires=0)
We can expand over the measurement and preparation nodes using:
>>> tapes, prep, meas = qml.transforms.qcut.expand_fragment_tape(tape)
>>> for t in tapes:
... print(qml.drawer.tape_text(t, decimals=1))
0: ──I──RX(0.5)─┤ <I> <Z>
0: ──I──RX(0.5)─┤ <X>
0: ──I──RX(0.5)─┤ <Y>
0: ──X──RX(0.5)─┤ <I> <Z>
0: ──X──RX(0.5)─┤ <X>
0: ──X──RX(0.5)─┤ <Y>
0: ──H──RX(0.5)─┤ <I> <Z>
0: ──H──RX(0.5)─┤ <X>
0: ──H──RX(0.5)─┤ <Y>
0: ──H──S──RX(0.5)─┤ <I> <Z>
0: ──H──S──RX(0.5)─┤ <X>
0: ──H──S──RX(0.5)─┤ <Y>
"""
prepare_nodes = [o for o in tape.operations if isinstance(o, PrepareNode)]
measure_nodes = [o for o in tape.operations if isinstance(o, MeasureNode)]
wire_map = {mn.wires[0]: i for i, mn in enumerate(measure_nodes)}
n_meas = len(measure_nodes)
if n_meas >= 1:
measure_combinations = qml.grouping.partition_pauli_group(len(measure_nodes))
else:
measure_combinations = [[""]]
tapes = []
for prepare_settings in product(range(len(PREPARE_SETTINGS)), repeat=len(prepare_nodes)):
for measure_group in measure_combinations:
if n_meas >= 1:
group = [
string_to_pauli_word(paulis, wire_map=wire_map) for paulis in measure_group
]
else:
group = []
prepare_mapping = {
n: PREPARE_SETTINGS[s] for n, s in zip(prepare_nodes, prepare_settings)
}
with QuantumTape() as tape_:
for op in tape.operations:
if isinstance(op, PrepareNode):
w = op.wires[0]
prepare_mapping[op](w)
elif not isinstance(op, MeasureNode):
apply(op)
with qml.tape.stop_recording():
measurements = _get_measurements(group, tape.measurements)
for meas in measurements:
apply(meas)
tapes.append(tape_)
return tapes, prepare_nodes, measure_nodes
MC_STATES = [
_prep_zero_state,
_prep_one_state,
_prep_plus_state,
_prep_minus_state,
_prep_iplus_state,
_prep_iminus_state,
_prep_zero_state,
_prep_one_state,
]
def _identity(wire):
qml.sample(qml.Identity(wires=wire))
def _pauliX(wire):
qml.sample(qml.PauliX(wires=wire))
def _pauliY(wire):
qml.sample(qml.PauliY(wires=wire))
def _pauliZ(wire):
qml.sample(qml.PauliZ(wires=wire))
MC_MEASUREMENTS = [
_identity,
_identity,
_pauliX,
_pauliX,
_pauliY,
_pauliY,
_pauliZ,
_pauliZ,
]
def expand_fragment_tapes_mc(
tapes: Sequence[QuantumTape], communication_graph: MultiDiGraph, shots: int
) -> Tuple[List[QuantumTape], np.ndarray]:
"""
Expands fragment tapes into a sequence of random configurations of the contained pairs of
:class:`MeasureNode` and :class:`PrepareNode` operations.
For each pair, a measurement is sampled from
the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates.
A settings array is also given which tracks the configuration pairs. Since each of the 4
measurements has 2 possible eigenvectors, all configurations can be uniquely identified by
8 values. The number of rows is determined by the number of cuts and the number of columns
is determined by the number of shots.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`~.cut_circuit_mc` transform for more details.
Args:
tapes (Sequence[QuantumTape]): the fragment tapes containing :class:`MeasureNode` and
:class:`PrepareNode` operations to be expanded
communication_graph (nx.MultiDiGraph): the communication (quotient) graph of the fragmented
full graph
shots (int): number of shots
Returns:
Tuple[List[QuantumTape], np.ndarray]: the tapes corresponding to each configuration and the
settings that track each configuration pair
**Example**
Consider the following circuit that contains a sample measurement:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
We can generate the fragment tapes using the following workflow:
>>> g = qml.transforms.qcut.tape_to_graph(tape)
>>> qml.transforms.qcut.replace_wire_cut_nodes(g)
>>> subgraphs, communication_graph = qml.transforms.qcut.fragment_graph(g)
>>> tapes = [qml.transforms.qcut.graph_to_tape(sg) for sg in subgraphs]
We can then expand over the measurement and preparation nodes to generate random
configurations using:
.. code-block:: python
>>> configs, settings = qml.transforms.qcut.expand_fragment_tapes_mc(tapes, communication_graph, 3)
>>> print(settings)
[[1 6 2]]
>>> for i, (c1, c2) in enumerate(zip(configs[0], configs[1])):
... print(f"config {i}:")
... print(c1.draw())
... print("")
... print(c2.draw())
... print("")
...
config 0:
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[I]
1: ──X─╭C─┤ Sample[|1⟩⟨1|]
2: ────╰X─┤ Sample[|1⟩⟨1|]
config 1:
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[Z]
1: ──I─╭C─┤ Sample[|1⟩⟨1|]
2: ────╰X─┤ Sample[|1⟩⟨1|]
config 2:
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[X]
1: ──H─╭C─┤ Sample[|1⟩⟨1|]
2: ────╰X─┤ Sample[|1⟩⟨1|]
"""
pairs = [e[-1] for e in communication_graph.edges.data("pair")]
settings = np.random.choice(range(8), size=(len(pairs), shots), replace=True)
meas_settings = {pair[0].id: setting for pair, setting in zip(pairs, settings)}
prep_settings = {pair[1].id: setting for pair, setting in zip(pairs, settings)}
all_configs = []
for tape in tapes:
frag_config = []
for shot in range(shots):
with qml.tape.QuantumTape() as new_tape:
for op in tape.operations:
w = op.wires[0]
if isinstance(op, PrepareNode):
MC_STATES[prep_settings[op.id][shot]](w)
elif not isinstance(op, MeasureNode):
qml.apply(op)
for meas in tape.measurements:
qml.apply(meas)
for op in tape.operations:
meas_w = op.wires[0]
if isinstance(op, MeasureNode):
MC_MEASUREMENTS[meas_settings[op.id][shot]](meas_w)
frag_config.append(new_tape)
all_configs.append(frag_config)
return all_configs, settings
def _reshape_results(results: Sequence, shots: int) -> List[List]:
"""
Helper function to reshape ``results`` into a two-dimensional nested list whose number of rows
is determined by the number of shots and whose number of columns is determined by the number of
cuts.
"""
results = [qml.math.flatten(r) for r in results]
results = [results[i : i + shots] for i in range(0, len(results), shots)]
results = list(map(list, zip(*results))) # calculate list-based transpose
return results
def qcut_processing_fn_sample(
results: Sequence, communication_graph: MultiDiGraph, shots: int
) -> List:
"""
Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>`
transform. This removes superfluous mid-circuit measurement samples from fragment
circuit outputs.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details.
Args:
results (Sequence): a collection of sample-based execution results generated from the
random expansion of circuit fragments over measurement and preparation node configurations
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
shots (int): the number of shots
Returns:
List[tensor_like]: the sampled output for all terminal measurements over the number of shots given
"""
res0 = results[0]
results = _reshape_results(results, shots)
out_degrees = [d for _, d in communication_graph.out_degree]
samples = []
for result in results:
sample = []
for fragment_result, out_degree in zip(result, out_degrees):
sample.append(fragment_result[: -out_degree or None])
samples.append(np.hstack(sample))
return [qml.math.convert_like(np.array(samples), res0)]
def qcut_processing_fn_mc(
results: Sequence,
communication_graph: MultiDiGraph,
settings: np.ndarray,
shots: int,
classical_processing_fn: callable,
):
"""
Function to postprocess samples for the :func:`cut_circuit_mc() <pennylane.cut_circuit_mc>`
transform. This takes a user-specified classical function to act on bitstrings and
generates an expectation value.
.. note::
This function is designed for use as part of the sampling-based circuit cutting workflow.
Check out the :func:`qml.cut_circuit_mc() <pennylane.cut_circuit_mc>` transform for more details.
Args:
results (Sequence): a collection of sample-based execution results generated from the
random expansion of circuit fragments over measurement and preparation node configurations
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
settings (np.ndarray): Each element is one of 8 unique values that tracks the specific
measurement and preparation operations over all configurations. The number of rows is determined
by the number of cuts and the number of columns is determined by the number of shots.
shots (int): the number of shots
classical_processing_fn (callable): A classical postprocessing function to be applied to
the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires``
and the output should be a single number within the interval :math:`[-1, 1]`.
Returns:
float or tensor_like: the expectation value calculated in accordance to Eq. (35) of
`Peng et al. <https://arxiv.org/abs/1904.00102>`__
"""
res0 = results[0]
results = _reshape_results(results, shots)
out_degrees = [d for _, d in communication_graph.out_degree]
evals = (0.5, 0.5, 0.5, -0.5, 0.5, -0.5, 0.5, -0.5)
expvals = []
for result, setting in zip(results, settings.T):
sample_terminal = []
sample_mid = []
for fragment_result, out_degree in zip(result, out_degrees):
sample_terminal.append(fragment_result[: -out_degree or None])
sample_mid.append(fragment_result[-out_degree or len(fragment_result) :])
sample_terminal = np.hstack(sample_terminal)
sample_mid = np.hstack(sample_mid)
assert set(sample_terminal).issubset({np.array(0), np.array(1)})
assert set(sample_mid).issubset({np.array(-1), np.array(1)})
# following Eq.(35) of Peng et.al: https://arxiv.org/abs/1904.00102
f = classical_processing_fn(sample_terminal)
if not -1 <= f <= 1:
raise ValueError(
"The classical processing function supplied must "
"give output in the interval [-1, 1]"
)
sigma_s = np.prod(sample_mid)
t_s = f * sigma_s
c_s = np.prod([evals[s] for s in setting])
K = len(sample_mid)
expvals.append(8**K * c_s * t_s)
return qml.math.convert_like(np.mean(expvals), res0)
@batch_transform
def cut_circuit_mc(
tape: QuantumTape,
classical_processing_fn: Optional[callable] = None,
auto_cutter: Union[bool, Callable] = False,
max_depth: int = 1,
shots: Optional[int] = None,
device_wires: Optional[Wires] = None,
**kwargs,
) -> Tuple[Tuple[QuantumTape], Callable]:
"""
Cut up a circuit containing sample measurements into smaller fragments using a
Monte Carlo method.
Following the approach of `Peng et al. <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.150504>`__,
strategic placement of :class:`~.WireCut` operations can allow a quantum circuit to be split
into disconnected circuit fragments. A circuit containing sample measurements can be cut and
processed using Monte Carlo (MC) methods. This transform employs MC methods to allow for sampled measurement
outcomes to be recombined to full bitstrings and, if a classical processing function is supplied,
an expectation value will be evaluated.
Args:
tape (QuantumTape): the tape of the full circuit to be cut
classical_processing_fn (callable): A classical postprocessing function to be applied to
the reconstructed bitstrings. The expected input is a bitstring; a flat array of length ``wires``,
and the output should be a single number within the interval :math:`[-1, 1]`.
If not supplied, the transform will output samples.
auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default
:func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that
takes an input graph and returns a list of edges to be cut based on a given set of
constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to
be installed using ``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from source for Windows users.
max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts.
Only applicable when transforming a QNode.
shots (int): Number of shots. When transforming a QNode, this argument is
set by the device's ``shots`` value or at QNode call time (if provided).
Required when transforming a tape.
device_wires (Wires): Wires of the device that the cut circuits are to be run on.
When transforming a QNode, this argument is optional and will be set to the
QNode's device wires. Required when transforming a tape.
kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument.
For the default KaHyPar cutter, please refer to the docstring of functions
:func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments.
Returns:
Callable: Function which accepts the same arguments as the QNode.
When called, this function will sample from the partitioned circuit fragments
and combine the results using a Monte Carlo method.
**Example**
The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation and a :func:`~.sample`
measurement. When decorated with ``@qml.cut_circuit_mc``, we can cut the circuit into two
:math:`2`-qubit fragments:
.. code-block:: python
dev = qml.device("default.qubit", wires=2, shots=1000)
@qml.cut_circuit_mc
@qml.qnode(dev)
def circuit(x):
qml.RX(0.89, wires=0)
qml.RY(0.5, wires=1)
qml.RX(1.3, wires=2)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.RX(x, wires=0)
qml.RY(0.7, wires=1)
qml.RX(2.3, wires=2)
return qml.sample(wires=[0, 2])
we can then execute the circuit as usual by calling the QNode:
>>> x = 0.3
>>> circuit(x)
tensor([[1, 1],
[0, 1],
[0, 1],
...,
[0, 1],
[0, 1],
[0, 1]], requires_grad=True)
Furthermore, the number of shots can be temporarily altered when calling
the qnode:
>>> results = circuit(x, shots=123)
>>> results.shape
(123, 2)
Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the
``auto_cutter`` option can be enabled to make attempts in finding such a optimal cut. The
following examples shows this capability on the same circuit as above but with the
:class:`~.WireCut` removed:
.. code-block:: python
@qml.cut_circuit_mc(auto_cutter=True)
@qml.qnode(dev)
def circuit(x):
qml.RX(0.89, wires=0)
qml.RY(0.5, wires=1)
qml.RX(1.3, wires=2)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.RX(x, wires=0)
qml.RY(0.7, wires=1)
qml.RX(2.3, wires=2)
return qml.sample(wires=[0, 2])
>>> results = circuit(x, shots=123)
>>> results.shape
(123, 2)
.. UsageDetails::
Manually placing :class:`~.WireCut` operations and decorating the QNode with the
``cut_circuit_mc()`` batch transform is the suggested entrypoint into sampling-based
circuit cutting using the Monte Carlo method. However,
advanced users also have the option to work directly with a :class:`~.QuantumTape` and
manipulate the tape to perform circuit cutting using the below functionality:
.. autosummary::
:toctree:
~transforms.qcut.tape_to_graph
~transforms.qcut.find_and_place_cuts
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tapes_mc
~transforms.qcut.qcut_processing_fn_sample
~transforms.qcut.qcut_processing_fn_mc
The following shows how these elementary steps are combined as part of the
``cut_circuit_mc()`` transform.
Consider the circuit below:
.. code-block:: python
np.random.seed(42)
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=1)
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
>>> print(tape.draw())
0: ──H─╭C───────────┤ ╭Sample
1: ────╰X──X──//─╭C─┤ ├Sample
2: ──────────────╰X─┤ ╰Sample
To cut the circuit, we first convert it to its graph representation:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use
:func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut
given the device constraints. Using the same circuit as above but with the
:class:`~.WireCut` removed, a slightly different cut with identical cost can be discovered
and placed into the circuit with automatic cutting:
.. code-block:: python
with qml.tape.QuantumTape() as uncut_tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=1)
qml.CNOT(wires=[1, 2])
qml.sample(wires=[0, 1, 2])
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
... graph=qml.transforms.qcut.tape_to_graph(uncut_tape),
... cut_strategy=qml.transforms.qcut.CutStrategy(max_free_wires=2),
... )
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──H─╭C───────────┤ Sample[|1⟩⟨1|]
1: ────╰X──//──X─╭C─┤ Sample[|1⟩⟨1|]
2: ──────────────╰X─┤ Sample[|1⟩⟨1|]
Our next step, using the original manual cut placement, is to remove the :class:`~.WireCut`
nodes in the graph and replace with :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs.
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that
allow us to cut the circuit graph and then randomly select measurement and preparation
configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart
the graph into disconnected components as well as returning the
`communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__
detailing the connectivity between the components.
>>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph)
We now convert the ``fragments`` back to :class:`~.QuantumTape` objects
>>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments]
The circuit fragments can now be visualized:
>>> print(fragment_tapes[0].draw())
0: ──H─╭C─────────────────┤ Sample[|1⟩⟨1|]
1: ────╰X──X──MeasureNode─┤
>>> print(fragment_tapes[1].draw())
1: ──PrepareNode─╭C─┤ Sample[|1⟩⟨1|]
2: ──────────────╰X─┤ Sample[|1⟩⟨1|]
Additionally, we must remap the tape wires to match those available on our device.
>>> dev = qml.device("default.qubit", wires=2, shots=1)
>>> fragment_tapes = [
... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes
... ]
Note that the number of shots on the device is set to :math:`1` here since we
will only require one execution per fragment configuration. In the
following steps we introduce a shots value that will determine the number
of fragment configurations. When using the ``cut_circuit_mc()`` decorator
with a QNode, this shots value is automatically inferred from the provided
device.
Next, each circuit fragment is randomly expanded over :class:`~.MeasureNode` and
:class:`~.PrepareNode` configurations. For each pair, a measurement is sampled from
the Pauli basis and a state preparation is sampled from the corresponding pair of eigenstates.
A settings array is also given which tracks the configuration pairs. Since each of the 4
measurements has 2 possible eigenvectors, all configurations can be uniquely identified by
8 values. The number of rows is determined by the number of cuts and the number of columns
is determined by the number of shots.
>>> shots = 3
>>> configurations, settings = qml.transforms.qcut.expand_fragment_tapes_mc(
... fragment_tapes, communication_graph, shots=shots
... )
>>> tapes = tuple(tape for c in configurations for tape in c)
>>> settings
tensor([[6, 3, 4]], requires_grad=True)
Each configuration is drawn below:
>>> for t in tapes:
... print(t.draw())
... print("")
.. code-block::
0: ──H─╭C────┤ Sample[|1⟩⟨1|]
1: ────╰X──X─┤ Sample[Z]
0: ──H─╭C────┤ Sample[|1⟩⟨1|]
1: ────╰X──X─┤ Sample[X]
0: ──H─╭C────┤ Sample[|1⟩⟨1|]
1: ────╰X──X─┤ Sample[Y]
0: ──I─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[|1⟩⟨1|]
0: ──X──S─╭C─┤ Sample[|1⟩⟨1|]
1: ───────╰X─┤ Sample[|1⟩⟨1|]
0: ──H─╭C─┤ Sample[|1⟩⟨1|]
1: ────╰X─┤ Sample[|1⟩⟨1|]
The last step is to execute the tapes and postprocess the results using
:func:`~.qcut_processing_fn_sample`, which processes the results to approximate the original full circuit
output bitstrings.
>>> results = qml.execute(tapes, dev, gradient_fn=None)
>>> qml.transforms.qcut.qcut_processing_fn_sample(
... results,
... communication_graph,
... shots=shots,
... )
[array([[0., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]])]
Alternatively, it is possible to calculate an expectation value if a classical
processing function is provided that will accept the reconstructed circuit bitstrings
and return a value in the interval :math:`[-1, 1]`:
.. code-block::
def fn(x):
if x[0] == 0:
return 1
if x[0] == 1:
return -1
>>> qml.transforms.qcut.qcut_processing_fn_mc(
... results,
... communication_graph,
... settings,
... shots,
... fn
... )
array(-4.)
Using the Monte Carlo approach of [Peng et. al](https://arxiv.org/abs/1904.00102), the
`cut_circuit_mc` transform also supports returning sample-based expectation values of
observables that are diagonal in the computational basis, as shown below for a `ZZ` measurement
on wires `0` and `2`:
.. code-block::
dev = qml.device("default.qubit", wires=2, shots=10000)
def observable(bitstring):
return (-1) ** np.sum(bitstring)
@qml.cut_circuit_mc(classical_processing_fn=observable)
@qml.qnode(dev)
def circuit(x):
qml.RX(0.89, wires=0)
qml.RY(0.5, wires=1)
qml.RX(1.3, wires=2)
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=[1, 2])
qml.RX(x, wires=0)
qml.RY(0.7, wires=1)
qml.RX(2.3, wires=2)
return qml.sample(wires=[0, 2])
We can now approximate the expectation value of the observable using
>>> circuit(x)
tensor(-0.776, requires_grad=True)
"""
# pylint: disable=unused-argument, too-many-arguments
if len(tape.measurements) != 1:
raise ValueError(
"The Monte Carlo circuit cutting workflow only supports circuits "
"with a single output measurement"
)
if not all(m.return_type is Sample for m in tape.measurements):
raise ValueError(
"The Monte Carlo circuit cutting workflow only supports circuits "
"with sampling-based measurements"
)
for meas in tape.measurements:
if meas.obs is not None:
raise ValueError(
"The Monte Carlo circuit cutting workflow only "
"supports measurements in the computational basis. Please only specify "
"wires to be sampled within qml.sample(), do not pass observables."
)
g = tape_to_graph(tape)
if auto_cutter is True or callable(auto_cutter):
cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy(
max_free_wires=len(device_wires)
)
g = find_and_place_cuts(
graph=g,
cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut,
cut_strategy=cut_strategy,
**kwargs,
)
replace_wire_cut_nodes(g)
fragments, communication_graph = fragment_graph(g)
fragment_tapes = [graph_to_tape(f) for f in fragments]
fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes]
configurations, settings = expand_fragment_tapes_mc(
fragment_tapes, communication_graph, shots=shots
)
tapes = tuple(tape for c in configurations for tape in c)
if classical_processing_fn:
return tapes, partial(
qcut_processing_fn_mc,
communication_graph=communication_graph,
settings=settings,
shots=shots,
classical_processing_fn=classical_processing_fn,
)
return tapes, partial(
qcut_processing_fn_sample, communication_graph=communication_graph, shots=shots
)
@cut_circuit_mc.custom_qnode_wrapper
def qnode_execution_wrapper_mc(self, qnode, targs, tkwargs):
"""Here, we overwrite the QNode execution wrapper in order
to replace execution variables"""
transform_max_diff = tkwargs.pop("max_diff", None)
tkwargs.setdefault("device_wires", qnode.device.wires)
if "shots" in inspect.signature(qnode.func).parameters:
raise ValueError(
"Detected 'shots' as an argument of the quantum function to transform. "
"The 'shots' argument name is reserved for overriding the number of shots "
"taken by the device."
)
def _wrapper(*args, **kwargs):
if tkwargs.get("shots", False):
raise ValueError(
"Cannot provide a 'shots' value directly to the cut_circuit_mc "
"decorator when transforming a QNode. Please provide the number of shots in "
"the device or when calling the QNode."
)
shots = kwargs.pop("shots", False)
shots = shots or qnode.device.shots
if shots is None:
raise ValueError(
"A shots value must be provided in the device "
"or when calling the QNode to be cut"
)
qnode.construct(args, kwargs)
tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs, shots=shots)
interface = qnode.interface
execute_kwargs = getattr(qnode, "execute_kwargs", {}).copy()
max_diff = execute_kwargs.pop("max_diff", 2)
max_diff = transform_max_diff or max_diff
gradient_fn = getattr(qnode, "gradient_fn", qnode.diff_method)
gradient_kwargs = getattr(qnode, "gradient_kwargs", {})
if interface is None or not self.differentiable:
gradient_fn = None
execute_kwargs["cache"] = False
res = qml.execute(
tapes,
device=qnode.device,
gradient_fn=gradient_fn,
interface=interface,
max_diff=max_diff,
override_shots=1,
gradient_kwargs=gradient_kwargs,
**execute_kwargs,
)
out = processing_fn(res)
if isinstance(out, list) and len(out) == 1:
return out[0]
return out
return _wrapper
def _get_symbol(i):
"""Finds the i-th ASCII symbol. Works for lowercase and uppercase letters, allowing i up to
51."""
if i >= len(string.ascii_letters):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(string.ascii_letters)} wire cuts to a circuit"
)
return string.ascii_letters[i]
# pylint: disable=too-many-branches
def contract_tensors(
tensors: Sequence,
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
r"""Contract tensors according to the edges specified in the communication graph.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Consider the three tensors :math:`T^{(1)}`, :math:`T^{(2)}`, and :math:`T^{(3)}`, along with
their contraction equation
.. math::
\sum_{ijklmn} T^{(1)}_{ij,km} T^{(2)}_{kl,in} T^{(3)}_{mn,jl}
Each tensor is the result of the tomography of a circuit fragment and has some indices
corresponding to state preparations (marked by the indices before the comma) and some indices
corresponding to measurements (marked by the indices after the comma).
An equivalent representation of the contraction equation is to use a directed multigraph known
as the communication/quotient graph. In the communication graph, each tensor is assigned a node
and edges are added between nodes to mark a contraction along an index. The communication graph
resulting from the above contraction equation is a complete directed graph.
In the communication graph provided by :func:`fragment_graph`, edges are composed of
:class:`PrepareNode` and :class:`MeasureNode` pairs. To correctly map back to the contraction
equation, we must keep track of the order of preparation and measurement indices in each tensor.
This order is specified in the ``prepare_nodes`` and ``measure_nodes`` arguments.
Args:
tensors (Sequence): the tensors to be contracted
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between the tensors
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of preparation indices in
each tensor
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of measurement indices in
each tensor
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
Returns:
float or tensor_like: the result of contracting the tensor network
**Example**
We first set up the tensors and their corresponding :class:`~.PrepareNode` and
:class:`~.MeasureNode` orderings:
.. code-block:: python
from pennylane.transforms import qcut
import networkx as nx
import numpy as np
tensors = [np.arange(4), np.arange(4, 8)]
prep = [[], [qcut.PrepareNode(wires=0)]]
meas = [[qcut.MeasureNode(wires=0)], []]
The communication graph describing edges in the tensor network must also be constructed:
.. code-block:: python
graph = nx.MultiDiGraph([(0, 1, {"pair": (meas[0][0], prep[1][0])})])
The network can then be contracted using:
>>> qml.transforms.qcut.contract_tensors(tensors, graph, prep, meas)
38
"""
# pylint: disable=import-outside-toplevel
if use_opt_einsum:
try:
from opt_einsum import contract, get_symbol
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the contract_tensors function. This package can be "
"installed using:\npip install opt_einsum"
) from e
else:
contract = qml.math.einsum
get_symbol = _get_symbol
ctr = 0
tensor_indxs = [""] * len(communication_graph.nodes)
meas_map = {}
for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)):
predecessors = communication_graph.pred[node]
for p in prep:
for _, pred_edges in predecessors.items():
for pred_edge in pred_edges.values():
meas_op, prep_op = pred_edge["pair"]
if p.id is prep_op.id:
symb = get_symbol(ctr)
ctr += 1
tensor_indxs[i] += symb
meas_map[meas_op] = symb
for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)):
successors = communication_graph.succ[node]
for m in meas:
for _, succ_edges in successors.items():
for succ_edge in succ_edges.values():
meas_op, _ = succ_edge["pair"]
if m.id is meas_op.id:
symb = meas_map[meas_op]
tensor_indxs[i] += symb
eqn = ",".join(tensor_indxs)
kwargs = {} if use_opt_einsum else {"like": tensors[0]}
return contract(eqn, *tensors, **kwargs)
CHANGE_OF_BASIS = qml.math.array(
[[1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 2.0, 0.0], [-1.0, -1.0, 0.0, 2.0], [1.0, -1.0, 0.0, 0.0]]
)
def _process_tensor(results, n_prep: int, n_meas: int):
"""Convert a flat slice of an individual circuit fragment's execution results into a tensor.
This function performs the following steps:
1. Reshapes ``results`` into the intermediate shape ``(4,) * n_prep + (4**n_meas,)``
2. Shuffles the final axis to follow the standard product over measurement settings. E.g., for
``n_meas = 2`` the standard product is: II, IX, IY, IZ, XI, ..., ZY, ZZ while the input order
will be the result of ``qml.grouping.partition_pauli_group(2)``, i.e., II, IZ, ZI, ZZ, ...,
YY.
3. Reshapes into the final target shape ``(4,) * (n_prep + n_meas)``
4. Performs a change of basis for the preparation indices (the first ``n_prep`` indices) from
the |0>, |1>, |+>, |+i> basis to the I, X, Y, Z basis using ``CHANGE_OF_BASIS``.
Args:
results (tensor_like): the input execution results
n_prep (int): the number of preparation nodes in the corresponding circuit fragment
n_meas (int): the number of measurement nodes in the corresponding circuit fragment
Returns:
tensor_like: the corresponding fragment tensor
"""
n = n_prep + n_meas
dim_meas = 4**n_meas
# Step 1
intermediate_shape = (4,) * n_prep + (dim_meas,)
intermediate_tensor = qml.math.reshape(results, intermediate_shape)
# Step 2
grouped = qml.grouping.partition_pauli_group(n_meas)
grouped_flat = [term for group in grouped for term in group]
order = qml.math.argsort(grouped_flat)
if qml.math.get_interface(intermediate_tensor) == "tensorflow":
# TensorFlow does not support slicing
intermediate_tensor = qml.math.gather(intermediate_tensor, order, axis=-1)
else:
sl = [slice(None)] * n_prep + [order]
intermediate_tensor = intermediate_tensor[tuple(sl)]
# Step 3
final_shape = (4,) * n
final_tensor = qml.math.reshape(intermediate_tensor, final_shape)
# Step 4
change_of_basis = qml.math.convert_like(CHANGE_OF_BASIS, intermediate_tensor)
for i in range(n_prep):
axes = [[1], [i]]
final_tensor = qml.math.tensordot(change_of_basis, final_tensor, axes=axes)
axes = list(reversed(range(n_prep))) + list(range(n_prep, n))
# Use transpose to reorder indices. We must do this because tensordot returns a tensor whose
# indices are ordered according to the uncontracted indices of the first tensor, followed
# by the uncontracted indices of the second tensor. For example, calculating C_kj T_ij returns
# a tensor T'_ki rather than T'_ik.
final_tensor = qml.math.transpose(final_tensor, axes=axes)
final_tensor *= qml.math.power(2, -(n_meas + n_prep) / 2)
return final_tensor
def _to_tensors(
results,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
) -> List:
"""Process a flat list of execution results from all circuit fragments into the corresponding
tensors.
This function slices ``results`` according to the expected size of fragment tensors derived from
the ``prepare_nodes`` and ``measure_nodes`` and then passes onto ``_process_tensor`` for further
transformation.
Args:
results (tensor_like): A collection of execution results, provided as a flat tensor,
corresponding to the expansion of circuit fragments in the communication graph over
measurement and preparation node configurations. These results are processed into
tensors by this function.
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence whose length is equal to the
number of circuit fragments, with each element used here to determine the number of
preparation nodes in a given fragment
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence whose length is equal to the
number of circuit fragments, with each element used here to determine the number of
measurement nodes in a given fragment
Returns:
List[tensor_like]: the tensors for each circuit fragment in the communication graph
"""
ctr = 0
tensors = []
for p, m in zip(prepare_nodes, measure_nodes):
n_prep = len(p)
n_meas = len(m)
n = n_prep + n_meas
dim = 4**n
results_slice = results[ctr : dim + ctr]
tensors.append(_process_tensor(results_slice, n_prep, n_meas))
ctr += dim
if results.shape[0] != ctr:
raise ValueError(f"The results argument should be a flat list of length {ctr}")
return tensors
def qcut_processing_fn(
results: Sequence[Sequence],
communication_graph: MultiDiGraph,
prepare_nodes: Sequence[Sequence[PrepareNode]],
measure_nodes: Sequence[Sequence[MeasureNode]],
use_opt_einsum: bool = False,
):
"""Processing function for the :func:`cut_circuit() <pennylane.cut_circuit>` transform.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
results (Sequence[Sequence]): A collection of execution results generated from the
expansion of circuit fragments over measurement and preparation node configurations.
These results are processed into tensors and then contracted.
communication_graph (nx.MultiDiGraph): the communication graph determining connectivity
between circuit fragments
prepare_nodes (Sequence[Sequence[PrepareNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of preparation indices in
each tensor
measure_nodes (Sequence[Sequence[MeasureNode]]): a sequence of size
``len(communication_graph.nodes)`` that determines the order of measurement indices in
each tensor
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
Returns:
float or tensor_like: the output of the original uncut circuit arising from contracting
the tensor network of circuit fragments
"""
flat_results = qml.math.concatenate(results)
tensors = _to_tensors(flat_results, prepare_nodes, measure_nodes)
result = contract_tensors(
tensors, communication_graph, prepare_nodes, measure_nodes, use_opt_einsum
)
return result
@batch_transform
def cut_circuit(
tape: QuantumTape,
auto_cutter: Union[bool, Callable] = False,
use_opt_einsum: bool = False,
device_wires: Optional[Wires] = None,
max_depth: int = 1,
**kwargs,
) -> Tuple[Tuple[QuantumTape], Callable]:
"""
Cut up a quantum circuit into smaller circuit fragments.
Following the approach outlined in Theorem 2 of
`Peng et al. <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.150504>`__,
strategic placement of :class:`~.WireCut` operations can allow a quantum circuit to be split
into disconnected circuit fragments. Each circuit fragment is then executed multiple times by
varying the state preparations and measurements at incoming and outgoing cut locations,
respectively, resulting in a process tensor describing the action of the fragment. The process
tensors are then contracted to provide the result of the original uncut circuit.
.. note::
Only circuits that return a single expectation value are supported.
Args:
tape (QuantumTape): the tape of the full circuit to be cut
auto_cutter (Union[bool, Callable]): Toggle for enabling automatic cutting with the default
:func:`~.kahypar_cut` partition method. Can also pass a graph partitioning function that
takes an input graph and returns a list of edges to be cut based on a given set of
constraints and objective. The default :func:`~.kahypar_cut` function requires KaHyPar to
be installed using ``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from source for Windows users.
use_opt_einsum (bool): Determines whether to use the
`opt_einsum <https://dgasmith.github.io/opt_einsum/>`__ package. This package is useful
for faster tensor contractions of large networks but must be installed separately using,
e.g., ``pip install opt_einsum``. Both settings for ``use_opt_einsum`` result in a
differentiable contraction.
device_wires (Wires): Wires of the device that the cut circuits are to be run on.
When transforming a QNode, this argument is optional and will be set to the
QNode's device wires. Required when transforming a tape.
max_depth (int): The maximum depth used to expand the circuit while searching for wire cuts.
Only applicable when transforming a QNode.
kwargs: Additional keyword arguments to be passed to a callable ``auto_cutter`` argument.
For the default KaHyPar cutter, please refer to the docstring of functions
:func:`~.find_and_place_cuts` and :func:`~.kahypar_cut` for the available arguments.
Returns:
Callable: Function which accepts the same arguments as the QNode.
When called, this function will perform a process tomography of the
partitioned circuit fragments and combine the results via tensor
contractions.
**Example**
The following :math:`3`-qubit circuit contains a :class:`~.WireCut` operation. When decorated
with ``@qml.cut_circuit``, we can cut the circuit into two :math:`2`-qubit fragments:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.cut_circuit
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.WireCut(wires=1)
qml.CZ(wires=[1, 2])
return qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
Executing ``circuit`` will run multiple configurations of the :math:`2`-qubit fragments which
are then postprocessed to give the result of the original circuit:
>>> x = np.array(0.531, requires_grad=True)
>>> circuit(x)
0.47165198882111165
Futhermore, the output of the cut circuit is also differentiable:
>>> qml.grad(circuit)(x)
-0.276982865449393
Alternatively, if the optimal wire-cut placement is unknown for an arbitrary circuit, the
``auto_cutter`` option can be enabled to make attempts in finding such an optimal cut. The
following examples shows this capability on the same circuit as above but with the
:class:`~.WireCut` removed:
.. code-block:: python
@qml.cut_circuit(auto_cutter=True)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.CZ(wires=[1, 2])
return qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> x = np.array(0.531, requires_grad=True)
>>> circuit(x)
0.47165198882111165
>>> qml.grad(circuit)(x)
-0.276982865449393
.. UsageDetails::
Manually placing :class:`~.WireCut` operations and decorating the QNode with the
``cut_circuit()`` batch transform is the suggested entrypoint into circuit cutting. However,
advanced users also have the option to work directly with a :class:`~.QuantumTape` and
manipulate the tape to perform circuit cutting using the below functionality:
.. autosummary::
:toctree:
~transforms.qcut.tape_to_graph
~transforms.qcut.find_and_place_cuts
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tape
~transforms.qcut.qcut_processing_fn
~transforms.qcut.CutStrategy
The following shows how these elementary steps are combined as part of the
``cut_circuit()`` transform.
Consider the circuit below:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.531, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.WireCut(wires=1)
qml.CZ(wires=[1, 2])
qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> print(tape.draw())
0: ──RX(0.531)──╭C──RY(-0.4)──────╭┤ ⟨Z ⊗ Z ⊗ Z⟩
1: ──RY(0.9)────╰Z──//────────╭C──├┤ ⟨Z ⊗ Z ⊗ Z⟩
2: ──RX(0.3)──────────────────╰Z──╰┤ ⟨Z ⊗ Z ⊗ Z⟩
To cut the circuit, we first convert it to its graph representation:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
.. figure:: ../../_static/qcut_graph.svg
:align: center
:width: 60%
:target: javascript:void(0);
If, however, the optimal location of the :class:`~.WireCut` is unknown, we can use
:func:`~.find_and_place_cuts` to make attempts in automatically finding such a cut
given the device constraints. Using the same circuit as above but with the
:class:`~.WireCut` removed, the same (optimal) cut can be recovered with automatic
cutting:
.. code-block:: python
with qml.tape.QuantumTape() as uncut_tape:
qml.RX(0.531, wires=0)
qml.RY(0.9, wires=1)
qml.RX(0.3, wires=2)
qml.CZ(wires=[0, 1])
qml.RY(-0.4, wires=0)
qml.CZ(wires=[1, 2])
qml.expval(qml.grouping.string_to_pauli_word("ZZZ"))
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph = qml.transforms.qcut.tape_to_graph(uncut_tape),
cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2),
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX─╭C──RY────┤ ╭<Z@Z@Z>
1: ──RY─╰Z──//─╭C─┤ ├<Z@Z@Z>
2: ──RX────────╰Z─┤ ╰<Z@Z@Z>
Our next step is to remove the :class:`~.WireCut` nodes in the graph and replace with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs.
>>> qml.transforms.qcut.replace_wire_cut_nodes(graph)
The :class:`~.MeasureNode` and :class:`~.PrepareNode` pairs are placeholder operations that
allow us to cut the circuit graph and then iterate over measurement and preparation
configurations at cut locations. First, the :func:`~.fragment_graph` function pulls apart
the graph into disconnected components as well as returning the
`communication_graph <https://en.wikipedia.org/wiki/Quotient_graph>`__
detailing the connectivity between the components.
>>> fragments, communication_graph = qml.transforms.qcut.fragment_graph(graph)
We now convert the ``fragments`` back to :class:`~.QuantumTape` objects
>>> fragment_tapes = [qml.transforms.qcut.graph_to_tape(f) for f in fragments]
The circuit fragments can now be visualized:
>>> print(fragment_tapes[0].draw())
0: ──RX(0.531)──╭C──RY(-0.4)─────┤ ⟨Z⟩
1: ──RY(0.9)────╰Z──MeasureNode──┤
>>> print(fragment_tapes[1].draw())
2: ──RX(0.3)──────╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──PrepareNode──╰C──╰┤ ⟨Z ⊗ Z⟩
Additionally, we must remap the tape wires to match those available on our device.
>>> dev = qml.device("default.qubit", wires=2)
>>> fragment_tapes = [
... qml.transforms.qcut.remap_tape_wires(t, dev.wires) for t in fragment_tapes
... ]
Next, each circuit fragment is expanded over :class:`~.MeasureNode` and
:class:`~.PrepareNode` configurations and a flat list of tapes is created:
.. code-block::
expanded = [qml.transforms.qcut.expand_fragment_tape(t) for t in fragment_tapes]
configurations = []
prepare_nodes = []
measure_nodes = []
for tapes, p, m in expanded:
configurations.append(tapes)
prepare_nodes.append(p)
measure_nodes.append(m)
tapes = tuple(tape for c in configurations for tape in c)
Each configuration is drawn below:
>>> for t in tapes:
... print(t.draw())
.. code-block::
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ I⟩ ╭┤ ⟨Z ⊗ Z⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ I⟩ ╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ X⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ X⟩
0: ──RX(0.531)──╭C──RY(-0.4)──╭┤ ⟨Z ⊗ Y⟩
1: ──RY(0.9)────╰Z────────────╰┤ ⟨Z ⊗ Y⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──I────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──X────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)──╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──H────────╰C──╰┤ ⟨Z ⊗ Z⟩
0: ──RX(0.3)─────╭Z──╭┤ ⟨Z ⊗ Z⟩
1: ──H────────S──╰C──╰┤ ⟨Z ⊗ Z⟩
The last step is to execute the tapes and postprocess the results using
:func:`~.qcut_processing_fn`, which processes the results to the original full circuit
output via a tensor network contraction
>>> results = qml.execute(tapes, dev, gradient_fn=None)
>>> qml.transforms.qcut.qcut_processing_fn(
... results,
... communication_graph,
... prepare_nodes,
... measure_nodes,
... )
0.47165198882111165
"""
# pylint: disable=unused-argument
if len(tape.measurements) != 1:
raise ValueError(
"The circuit cutting workflow only supports circuits with a single output "
"measurement"
)
if not all(m.return_type is Expectation for m in tape.measurements):
raise ValueError(
"The circuit cutting workflow only supports circuits with expectation "
"value measurements"
)
if use_opt_einsum:
try:
import opt_einsum # pylint: disable=import-outside-toplevel,unused-import
except ImportError as e:
raise ImportError(
"The opt_einsum package is required when use_opt_einsum is set to "
"True in the cut_circuit function. This package can be "
"installed using:\npip install opt_einsum"
) from e
g = tape_to_graph(tape)
if auto_cutter is True or callable(auto_cutter):
cut_strategy = kwargs.pop("cut_strategy", None) or CutStrategy(
max_free_wires=len(device_wires)
)
g = find_and_place_cuts(
graph=g,
cut_method=auto_cutter if callable(auto_cutter) else kahypar_cut,
cut_strategy=cut_strategy,
**kwargs,
)
replace_wire_cut_nodes(g)
fragments, communication_graph = fragment_graph(g)
fragment_tapes = [graph_to_tape(f) for f in fragments]
fragment_tapes = [remap_tape_wires(t, device_wires) for t in fragment_tapes]
expanded = [expand_fragment_tape(t) for t in fragment_tapes]
configurations = []
prepare_nodes = []
measure_nodes = []
for tapes, p, m in expanded:
configurations.append(tapes)
prepare_nodes.append(p)
measure_nodes.append(m)
tapes = tuple(tape for c in configurations for tape in c)
return tapes, partial(
qcut_processing_fn,
communication_graph=communication_graph,
prepare_nodes=prepare_nodes,
measure_nodes=measure_nodes,
use_opt_einsum=use_opt_einsum,
)
@cut_circuit.custom_qnode_wrapper
def qnode_execution_wrapper(self, qnode, targs, tkwargs):
"""Here, we overwrite the QNode execution wrapper in order
to access the device wires."""
# pylint: disable=function-redefined
tkwargs.setdefault("device_wires", qnode.device.wires)
return self.default_qnode_wrapper(qnode, targs, tkwargs)
def _qcut_expand_fn(
tape: QuantumTape,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
):
"""Expansion function for circuit cutting.
Expands operations until reaching a depth that includes :class:`~.WireCut` operations.
"""
for op in tape.operations:
if isinstance(op, WireCut):
return tape
if max_depth > 0:
return _qcut_expand_fn(tape.expand(), max_depth=max_depth - 1, auto_cutter=auto_cutter)
if not (auto_cutter is True or callable(auto_cutter)):
raise ValueError(
"No WireCut operations found in the circuit. Consider increasing the max_depth value if"
" operations or nested tapes contain WireCut operations."
)
return tape
def _cut_circuit_expand(
tape: QuantumTape,
use_opt_einsum: bool = False,
device_wires: Optional[Wires] = None,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations until reaching a depth that
includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument
return _qcut_expand_fn(tape, max_depth, auto_cutter)
def _cut_circuit_mc_expand(
tape: QuantumTape,
classical_processing_fn: Optional[callable] = None,
max_depth: int = 1,
shots: Optional[int] = None,
device_wires: Optional[Wires] = None,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations in sample-based tapes until
reaching a depth that includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument, too-many-arguments
return _qcut_expand_fn(tape, max_depth, auto_cutter)
cut_circuit.expand_fn = _cut_circuit_expand
cut_circuit_mc.expand_fn = _cut_circuit_mc_expand
def remap_tape_wires(tape: QuantumTape, wires: Sequence) -> QuantumTape:
"""Map the wires of a tape to a new set of wires.
Given an :math:`n`-wire ``tape``, this function returns a new :class:`~.QuantumTape` with
operations and measurements acting on the first :math:`n` wires provided in the ``wires``
argument. The input ``tape`` is left unmodified.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the quantum tape whose wires should be remapped
wires (Sequence): the new set of wires to map to
Returns:
QuantumTape: A remapped copy of the input tape
Raises:
ValueError: if the number of wires in ``tape`` exceeds ``len(wires)``
**Example**
Consider the following circuit that operates on wires ``[2, 3]``:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.5, wires=2)
qml.RY(0.6, wires=3)
qml.CNOT(wires=[2, 3])
qml.expval(qml.PauliZ(2) @ qml.PauliZ(3))
We can map from wires ``[2, 3]`` to ``[0, 1]`` using:
>>> new_wires = [0, 1]
>>> new_tape = qml.transforms.qcut.remap_tape_wires(tape, new_wires)
>>> print(new_tape.draw())
0: ──RX(0.5)──╭C──╭┤ ⟨Z ⊗ Z⟩
1: ──RY(0.6)──╰X──╰┤ ⟨Z ⊗ Z⟩
"""
if len(tape.wires) > len(wires):
raise ValueError(
f"Attempting to run a {len(tape.wires)}-wire circuit on a "
f"{len(wires)}-wire device. Consider increasing the number of wires in "
f"your device."
)
wire_map = dict(zip(tape.wires, wires))
copy_ops = [copy.copy(op) for op in tape.operations]
copy_meas = [copy.copy(op) for op in tape.measurements]
with QuantumTape() as new_tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
op._wires = new_wires
apply(op)
for meas in copy_meas:
obs = meas.obs
if isinstance(obs, Tensor):
for obs in obs.obs:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
else:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
apply(meas)
return new_tape
@dataclass()
class CutStrategy:
"""
A circuit-cutting distribution policy for executing (large) circuits on available (comparably
smaller) devices.
.. note::
This class is part of a work-in-progress feature to support automatic cut placement in the
circuit cutting workflow. Currently only manual placement of cuts is supported,
check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
devices (Union[qml.Device, Sequence[qml.Device]]): Single, or Sequence of, device(s).
Optional only when ``max_free_wires`` is provided.
max_free_wires (int): Number of wires for the largest available device. Optional only when
``devices`` is provided where it defaults to the maximum number of wires among
``devices``.
min_free_wires (int): Number of wires for the smallest available device, or, equivalently,
the smallest max fragment-wire-size that the partitioning is allowed to explore.
When provided, this parameter will be used to derive an upper-bound to the range of
explored number of fragments. Optional, defaults to 2 which corresponds to attempting
the most granular partitioning of max 2-wire fragments.
num_fragments_probed (Union[int, Sequence[int]]): Single, or 2-Sequence of, number(s)
specifying the potential (range of) number of fragments for the partitioner to attempt.
Optional, defaults to probing all valid strategies derivable from the circuit and
devices. When provided, has precedence over all other arguments affecting partitioning
exploration, such as ``max_free_wires``, ``min_free_wires``, or ``exhaustive``.
max_free_gates (int): Maximum allowed circuit depth for the deepest available device.
Optional, defaults to unlimited depth.
min_free_gates (int): Maximum allowed circuit depth for the shallowest available device.
Optional, defaults to ``max_free_gates``.
imbalance_tolerance (float): The global maximum allowed imbalance for all partition trials.
Optional, defaults to unlimited imbalance. Used only if there's a known hard balancing
constraint on the partitioning problem.
trials_per_probe (int): Number of repeated partitioning trials for a random automatic
cutting method to attempt per set of partitioning parameters. For a deterministic
cutting method, this can be set to 1. Defaults to 4.
**Example**
The following cut strategy specifies that a circuit should be cut into between
``2`` to ``5`` fragments, with each fragment having at most ``6`` wires and
at least ``4`` wires:
>>> cut_strategy = qml.transforms.CutStrategy(
... max_free_wires=6,
... min_free_wires=4,
... num_fragments_probed=(2, 5),
... )
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
#: Initialization argument only, used to derive ``max_free_wires`` and ``min_free_wires``.
devices: InitVar[Union[qml.Device, Sequence[qml.Device]]] = None
#: Number of wires for the largest available device.
max_free_wires: int = None
#: Number of wires for the smallest available device.
min_free_wires: int = None
#: The potential (range of) number of fragments for the partitioner to attempt.
num_fragments_probed: Union[int, Sequence[int]] = None
#: Maximum allowed circuit depth for the deepest available device.
max_free_gates: int = None
#: Maximum allowed circuit depth for the shallowest available device.
min_free_gates: int = None
#: The global maximum allowed imbalance for all partition trials.
imbalance_tolerance: float = None
#: Number of trials to repeat for per set of partition parameters probed.
trials_per_probe: int = 4
#: Class attribute, threshold for warning about too many fragments.
HIGH_NUM_FRAGMENTS: ClassVar[int] = 20
#: Class attribute, threshold for warning about too many partition attempts.
HIGH_PARTITION_ATTEMPTS: ClassVar[int] = 20
def __post_init__(
self,
devices,
):
"""Deriving cutting constraints from given devices and parameters."""
self.max_free_wires = self.max_free_wires
if isinstance(self.num_fragments_probed, int):
self.num_fragments_probed = [self.num_fragments_probed]
if isinstance(self.num_fragments_probed, (list, tuple)):
self.num_fragments_probed = sorted(self.num_fragments_probed)
self.k_lower = self.num_fragments_probed[0]
self.k_upper = self.num_fragments_probed[-1]
if self.k_lower <= 0:
raise ValueError("`num_fragments_probed` must be positive int(s)")
else:
self.k_lower, self.k_upper = None, None
if devices is None and self.max_free_wires is None:
raise ValueError("One of arguments `devices` and max_free_wires` must be provided.")
if isinstance(devices, qml.Device):
devices = (devices,)
if devices is not None:
if not isinstance(devices, SequenceType) or any(
(not isinstance(d, qml.Device) for d in devices)
):
raise ValueError(
"Argument `devices` must be a list or tuple containing elements of type "
"`qml.Device`"
)
device_wire_sizes = [len(d.wires) for d in devices]
self.max_free_wires = self.max_free_wires or max(device_wire_sizes)
self.min_free_wires = self.min_free_wires or min(device_wire_sizes)
if (self.imbalance_tolerance is not None) and not (
isinstance(self.imbalance_tolerance, (float, int)) and self.imbalance_tolerance >= 0
):
raise ValueError(
"The overall `imbalance_tolerance` is expected to be a non-negative number, "
f"got {type(self.imbalance_tolerance)} with value {self.imbalance_tolerance}."
)
self.min_free_wires = self.min_free_wires or 1
def get_cut_kwargs(
self,
tape_dag: MultiDiGraph,
max_wires_by_fragment: Sequence[int] = None,
max_gates_by_fragment: Sequence[int] = None,
exhaustive: bool = True,
) -> List[Dict[str, Any]]:
"""Derive the complete set of arguments, based on a given circuit, for passing to a graph
partitioner.
Args:
tape_dag (nx.MultiDiGraph): Graph representing a tape, typically the output of
:func:`tape_to_graph`.
max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially
valid numbers of fragments into which the circuit is partitioned. If ``True``,
for a circuit with N gates, N - 1 attempts will be made with ``num_fragments``
ranging from [2, N], i.e. from bi-partitioning to complete partitioning where each
fragment has exactly a single gate. Defaults to ``True``.
Returns:
List[Dict[str, Any]]: A list of minimal kwargs being passed to a graph
partitioner method.
**Example**
Deriving kwargs for a given circuit and feeding them to a custom partitioner, along with
extra parameters specified using ``extra_kwargs``:
>>> cut_strategy = qcut.CutStrategy(devices=dev)
>>> cut_kwargs = cut_strategy.get_cut_kwargs(tape_dag)
>>> cut_trials = [
... my_partition_fn(tape_dag, **kwargs, **extra_kwargs) for kwargs in cut_kwargs
... ]
"""
tape_wires = set(w for _, _, w in tape_dag.edges.data("wire"))
num_tape_wires = len(tape_wires)
num_tape_gates = sum(not isinstance(n, WireCut) for n in tape_dag.nodes)
self._validate_input(max_wires_by_fragment, max_gates_by_fragment)
probed_cuts = self._infer_probed_cuts(
num_tape_wires=num_tape_wires,
num_tape_gates=num_tape_gates,
max_wires_by_fragment=max_wires_by_fragment,
max_gates_by_fragment=max_gates_by_fragment,
exhaustive=exhaustive,
)
return probed_cuts
@staticmethod
def _infer_imbalance(
k, num_wires, num_gates, free_wires, free_gates, imbalance_tolerance=None
) -> float:
"""Helper function for determining best imbalance limit."""
avg_fragment_wires = (num_wires - 1) // k + 1
avg_fragment_gates = (num_gates - 1) // k + 1
if free_wires < avg_fragment_wires:
raise ValueError(
"`free_wires` should be no less than the average number of wires per fragment. "
f"Got {free_wires} >= {avg_fragment_wires} ."
)
if free_gates < avg_fragment_gates:
raise ValueError(
"`free_gates` should be no less than the average number of gates per fragment. "
f"Got {free_gates} >= {avg_fragment_gates} ."
)
if free_gates > num_gates - k:
# Case where gate depth not limited (`-k` since each fragments has to have >= 1 gates):
free_gates = num_gates
# A small adjustment is added to the imbalance factor to prevents small ks from resulting
# in extremely unbalanced fragments. It will heuristically force the smallest fragment size
# to be >= 3 if the average fragment size is greater than 5. In other words, tiny fragments
# are only allowed when average fragmeng size is small in the first place.
balancing_adjustment = 2 if avg_fragment_gates > 5 else 0
free_gates = free_gates - (k - 1 + balancing_adjustment)
gate_imbalance = free_gates / avg_fragment_gates - 1
imbalance = max(gate_imbalance, 0.1 / avg_fragment_gates) # numerical stability
if imbalance_tolerance is not None:
imbalance = min(imbalance, imbalance_tolerance)
return imbalance
@staticmethod
def _validate_input(
max_wires_by_fragment,
max_gates_by_fragment,
):
"""Helper parameter checker."""
if max_wires_by_fragment is not None:
if not isinstance(max_wires_by_fragment, (list, tuple)):
raise ValueError(
"`max_wires_by_fragment` is expected to be a list or tuple, but got "
f"{type(max_gates_by_fragment)}."
)
if any(not (isinstance(i, int) and i > 0) for i in max_wires_by_fragment):
raise ValueError(
"`max_wires_by_fragment` is expected to contain positive integers only."
)
if max_gates_by_fragment is not None:
if not isinstance(max_gates_by_fragment, (list, tuple)):
raise ValueError(
"`max_gates_by_fragment` is expected to be a list or tuple, but got "
f"{type(max_gates_by_fragment)}."
)
if any(not (isinstance(i, int) and i > 0) for i in max_gates_by_fragment):
raise ValueError(
"`max_gates_by_fragment` is expected to contain positive integers only."
)
if max_wires_by_fragment is not None and max_gates_by_fragment is not None:
if len(max_wires_by_fragment) != len(max_gates_by_fragment):
raise ValueError(
"The lengths of `max_wires_by_fragment` and `max_gates_by_fragment` should be "
f"equal, but got {len(max_wires_by_fragment)} and {len(max_gates_by_fragment)}."
)
def _infer_probed_cuts(
self,
num_tape_wires,
num_tape_gates,
max_wires_by_fragment=None,
max_gates_by_fragment=None,
exhaustive=True,
) -> List[Dict[str, Any]]:
"""
Helper function for deriving the minimal set of best default partitioning constraints
for the graph partitioner.
Args:
num_tape_wires (int): Number of wires in the circuit tape to be partitioned.
num_tape_gates (int): Number of gates in the circuit tape to be partitioned.
max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially
valid numbers of fragments into which the circuit is partitioned. If ``True``,
``num_tape_gates - 1`` attempts will be made with ``num_fragments`` ranging from
[2, ``num_tape_gates``], i.e. from bi-partitioning to complete partitioning where
each fragment has exactly a single gate. Defaults to ``True``.
Returns:
List[Dict[str, Any]]: A list of minimal set of kwargs being passed to a graph
partitioner method.
"""
# Assumes unlimited width/depth if not supplied.
max_free_wires = self.max_free_wires or num_tape_wires
max_free_gates = self.max_free_gates or num_tape_gates
# Assumes same number of wires/gates across all devices if min_free_* not provided.
min_free_wires = self.min_free_wires or max_free_wires
min_free_gates = self.min_free_gates or max_free_gates
# The lower bound of k corresponds to executing each fragment on the largest available
# device.
k_lb = 1 + max(
(num_tape_wires - 1) // max_free_wires, # wire limited
(num_tape_gates - 1) // max_free_gates, # gate limited
)
# The upper bound of k corresponds to executing each fragment on the smallest available
# device.
k_ub = 1 + max(
(num_tape_wires - 1) // min_free_wires, # wire limited
(num_tape_gates - 1) // min_free_gates, # gate limited
)
if exhaustive:
k_lb = max(2, k_lb)
k_ub = num_tape_gates
# The global imbalance tolerance, if not given, defaults to a very loose upper bound:
imbalance_tolerance = k_ub if self.imbalance_tolerance is None else self.imbalance_tolerance
probed_cuts = []
if max_gates_by_fragment is None and max_wires_by_fragment is None:
# k_lower, when supplied by a user, can be higher than k_lb if the the desired k is known:
k_lower = self.k_lower if self.k_lower is not None else k_lb
# k_upper, when supplied by a user, can be higher than k_ub to encourage exploration:
k_upper = self.k_upper if self.k_upper is not None else k_ub
if k_lower < k_lb:
warnings.warn(
f"The provided `k_lower={k_lower}` is less than the lowest allowed value, "
f"will override and set `k_lower={k_lb}`."
)
k_lower = k_lb
if k_lower > self.HIGH_NUM_FRAGMENTS:
warnings.warn(
f"The attempted number of fragments seems high with lower bound at {k_lower}."
)
# Prepare the list of ks to explore:
ks = list(range(k_lower, k_upper + 1))
if len(ks) > self.HIGH_PARTITION_ATTEMPTS:
warnings.warn(f"The numer of partition attempts seems high ({len(ks)}).")
else:
# When the by-fragment wire and/or gate limits are supplied, derive k and imbalance and
# return a single partition config.
ks = [len(max_wires_by_fragment or max_gates_by_fragment)]
for k in ks:
imbalance = self._infer_imbalance(
k,
num_tape_wires,
num_tape_gates,
max_free_wires if max_wires_by_fragment is None else max(max_wires_by_fragment),
max_free_gates if max_gates_by_fragment is None else max(max_gates_by_fragment),
imbalance_tolerance,
)
cut_kwargs = {
"num_fragments": k,
"imbalance": imbalance,
}
if max_wires_by_fragment is not None:
cut_kwargs["max_wires_by_fragment"] = max_wires_by_fragment
if max_gates_by_fragment is not None:
cut_kwargs["max_gates_by_fragment"] = max_gates_by_fragment
probed_cuts.append(cut_kwargs)
return probed_cuts
def _graph_to_hmetis(
graph: MultiDiGraph,
hyperwire_weight: int = 0,
edge_weights: Sequence[int] = None,
) -> Tuple[List[int], List[int], List[Union[int, float]]]:
"""Converts a ``MultiDiGraph`` into the
`hMETIS hypergraph input format <http://glaros.dtc.umn.edu/gkhome/fetch/sw/hmetis/manual.pdf>`__
conforming to KaHyPar's calling signature.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires.
Defaults to 0 which leads to no such insertion. If greater than 0, hyperedges will be
appended with the provided weight, to encourage the resulting fragments to cluster gates
on the same wire together.
edge_weights (Sequence[int]): Weights for regular edges in the graph. Defaults to ``None``,
which leads to unit-weighted edges.
Returns:
Tuple[List,List,List]: The 3 lists representing an (optionally weighted) hypergraph:
- Flattened list of adjacent node indices.
- List of starting indices for edges in the above adjacent-nodes-list.
- Optional list of edge weights. ``None`` if ``hyperwire_weight`` is equal to 0.
"""
nodes = list(graph.nodes)
edges = graph.edges(data="wire")
wires = {w for _, _, w in edges}
adj_nodes = [nodes.index(v) for ops in graph.edges(keys=False) for v in ops]
edge_splits = qml.math.cumsum([0] + [len(e) for e in graph.edges(keys=False)]).tolist()
edge_weights = (
edge_weights if edge_weights is not None and len(edges) == len(edge_weights) else None
)
if hyperwire_weight:
hyperwires = {w: set() for w in wires}
num_wires = len(hyperwires)
for v0, v1, wire in edges:
hyperwires[wire].update([nodes.index(v0), nodes.index(v1)])
for wire, nodes_on_wire in hyperwires.items():
nwv = len(nodes_on_wire)
edge_splits.append(nwv + edge_splits[-1])
adj_nodes = adj_nodes + list(nodes_on_wire)
assert len(edge_splits) == len(edges) + num_wires + 1
if isinstance(hyperwire_weight, (int, float)):
# assumes original edges having unit weights by default:
edge_weights = edge_weights or ([1] * len(edges))
wire_weights = [hyperwire_weight] * num_wires
edge_weights = edge_weights + wire_weights
return adj_nodes, edge_splits, edge_weights
def kahypar_cut(
graph: MultiDiGraph,
num_fragments: int,
imbalance: int = None,
edge_weights: List[Union[int, float]] = None,
node_weights: List[Union[int, float]] = None,
fragment_weights: List[Union[int, float]] = None,
hyperwire_weight: int = 1,
seed: int = None,
config_path: Union[str, Path] = None,
trial: int = None,
verbose: bool = False,
) -> List[Tuple[Operation, Operation, Any]]:
"""Calls `KaHyPar <https://kahypar.org/>`__ to partition a graph.
.. warning::
Requires KaHyPar to be installed separately. For Linux and Mac users,
KaHyPar can be installed using ``pip install kahypar``. Windows users
can follow the instructions
`here <https://kahypar.org>`__ to compile from source.
Args:
graph (nx.MultiDiGraph): The graph to be partitioned.
num_fragments (int): Desired number of fragments.
imbalance (int): Imbalance factor of the partitioning. Defaults to KaHyPar's determination.
edge_weights (List[Union[int, float]]): Weights for edges. Defaults to unit-weighted edges.
node_weights (List[Union[int, float]]): Weights for nodes. Defaults to unit-weighted nodes.
fragment_weights (List[Union[int, float]]): Maximum size constraints by fragment. Defaults
to no such constraints, with ``imbalance`` the only parameter affecting fragment sizes.
hyperwire_weight (int): Weight on the artificially appended hyperedges representing wires.
Setting it to 0 leads to no such insertion. If greater than 0, hyperedges will be
appended with the provided weight, to encourage the resulting fragments to cluster gates
on the same wire together. Defaults to 1.
seed (int): KaHyPar's seed. Defaults to the seed in the config file which defaults to -1,
i.e. unfixed seed.
config_path (str): KaHyPar's ``.ini`` config file path. Defaults to its SEA20 paper config.
trial (int): trial id for summary label creation. Defaults to ``None``.
verbose (bool): Flag for printing KaHyPar's output summary. Defaults to ``False``.
Returns:
List[Union[int, Any]]: List of cut edges.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.RZ(0.240, wires=0)
qml.RZ(0.133, wires="a")
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.expval(qml.PauliZ(wires=[0]))
We can let KaHyPar automatically find the optimal edges to place cuts:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_edges = qml.transforms.qcut.kahypar_cut(
graph=graph,
num_fragments=2,
)
>>> cut_edges
[(CNOT(wires=[0, 'a']), RZ(0.24, wires=[0]), 0)]
"""
# pylint: disable=too-many-arguments, import-outside-toplevel
try:
import kahypar
except ImportError as e:
raise ImportError(
"KaHyPar must be installed to use this method for automatic "
"cut placement. Try pip install kahypar or visit "
"https://kahypar.org/ for installation instructions."
) from e
adjacent_nodes, edge_splits, edge_weights = _graph_to_hmetis(
graph=graph, hyperwire_weight=hyperwire_weight, edge_weights=edge_weights
)
trial = 0 if trial is None else trial
ne = len(edge_splits) - 1
nv = max(adjacent_nodes) + 1
if edge_weights is not None or node_weights is not None:
edge_weights = edge_weights or [1] * ne
node_weights = node_weights or [1] * nv
hypergraph = kahypar.Hypergraph(
nv,
ne,
edge_splits,
adjacent_nodes,
num_fragments,
edge_weights,
node_weights,
)
else:
hypergraph = kahypar.Hypergraph(nv, ne, edge_splits, adjacent_nodes, num_fragments)
context = kahypar.Context()
config_path = config_path or str(Path(__file__).parent / "_cut_kKaHyPar_sea20.ini")
context.loadINIconfiguration(config_path)
context.setK(num_fragments)
if isinstance(imbalance, float):
context.setEpsilon(imbalance)
if isinstance(fragment_weights, SequenceType) and (len(fragment_weights) == num_fragments):
context.setCustomTargetBlockWeights(fragment_weights)
if not verbose:
context.suppressOutput(True)
# KaHyPar fixes seed to 42 by default, need to manually sample seed to randomize:
kahypar_seed = np.random.default_rng(seed).choice(2**15)
context.setSeed(kahypar_seed)
kahypar.partition(hypergraph, context)
cut_edge_mask = [hypergraph.connectivity(e) > 1 for e in hypergraph.edges()]
# compress() ignores the extra hyperwires at the end if there is any.
cut_edges = list(compress(graph.edges, cut_edge_mask))
if verbose:
fragment_sizes = [hypergraph.blockSize(p) for p in range(num_fragments)]
print(len(fragment_sizes), fragment_sizes)
return cut_edges
def place_wire_cuts(
graph: MultiDiGraph, cut_edges: Sequence[Tuple[Operation, Operation, Any]]
) -> MultiDiGraph:
"""Inserts a :class:`~.WireCut` node for each provided cut edge into a circuit graph.
Args:
graph (nx.MultiDiGraph): The original (tape-converted) graph to be cut.
cut_edges (Sequence[Tuple[Operation, Operation, Any]]): List of ``MultiDiGraph`` edges
to be replaced with a :class:`~.WireCut` node. Each 3-tuple represents the source node, the
target node, and the wire key of the (multi)edge.
Returns:
MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.expval(qml.PauliZ(wires=[0]))
>>> print(tape.draw())
0: ──RX(0.432)──╭C──┤ ⟨Z⟩
a: ──RY(0.543)──╰X──┤
If we know we want to place a :class:`~.WireCut` node between nodes ``RY(0.543, wires=["a"])`` and
``CNOT(wires=[0, 'a'])`` after the tape is constructed, we can first find the edge in the graph:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> op0, op1 = tape.operations[1], tape.operations[2]
>>> cut_edges = [e for e in graph.edges if e[0] is op0 and e[1] is op1]
>>> cut_edges
[(RY(0.543, wires=['a']), CNOT(wires=[0, 'a']), 0)]
Then feed it to this function for placement:
>>> cut_graph = qml.transforms.qcut.place_wire_cuts(graph=graph, cut_edges=cut_edges)
>>> cut_graph
<networkx.classes.multidigraph.MultiDiGraph at 0x7f7251ac1220>
And visualize the cut by converting back to a tape:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX(0.432)──────╭C──┤ ⟨Z⟩
a: ──RY(0.543)──//──╰X──┤
"""
cut_graph = graph.copy()
for op0, op1, wire_key in cut_edges:
# Get info:
order = cut_graph.nodes[op0]["order"] + 1
wire = cut_graph.edges[(op0, op1, wire_key)]["wire"]
# Apply cut:
cut_graph.remove_edge(op0, op1, wire_key)
# Increment order for all subsequent gates:
for op, o in cut_graph.nodes(data="order"):
if o >= order:
cut_graph.nodes[op]["order"] += 1
# Add WireCut
wire_cut = WireCut(wires=wire)
cut_graph.add_node(wire_cut, order=order)
cut_graph.add_edge(op0, wire_cut, wire=wire)
cut_graph.add_edge(wire_cut, op1, wire=wire)
return cut_graph
def _remove_existing_cuts(graph: MultiDiGraph) -> MultiDiGraph:
"""Removes all existing, manually or automatically placed, cuts from a circuit graph, be it
``WireCut``s or ``MeasureNode``-``PrepareNode`` pairs.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
Returns:
(MultiDiGraph): Copy of the input graph with all its existing cuts removed.
"""
uncut_graph = graph.copy()
for op in list(graph.nodes):
if isinstance(op, WireCut):
uncut_graph.remove_node(op)
elif isinstance(op, MeasureNode):
for op1 in graph.neighbors(op):
if isinstance(op1, PrepareNode):
uncut_graph.remove_node(op)
uncut_graph.remove_node(op1)
if len([n for n in uncut_graph.nodes if isinstance(n, (MeasureNode, PrepareNode))]) > 0:
warnings.warn(
"The circuit contains `MeasureNode` or `PrepareNode` operations that are "
"not paired up correctly. Please check.",
UserWarning,
)
return uncut_graph
def find_and_place_cuts(
graph: MultiDiGraph,
cut_method: Callable = kahypar_cut,
cut_strategy: CutStrategy = None,
replace_wire_cuts=False,
local_measurement=False,
**kwargs,
) -> MultiDiGraph:
"""Automatically finds and places optimal :class:`~.WireCut` nodes into a given tape-converted graph
using a customizable graph partitioning function. Preserves existing placed cuts.
Args:
graph (MultiDiGraph): The original (tape-converted) graph to be cut.
cut_method (Callable): A graph partitioning function that takes an input graph and returns
a list of edges to be cut based on a given set of constraints and objective. Defaults
to :func:`kahypar_cut` which requires KaHyPar to be installed using
``pip install kahypar`` for Linux and Mac users or visiting the
instructions `here <https://kahypar.org>`__ to compile from
source for Windows users.
cut_strategy (CutStrategy): Strategy for optimizing cutting parameters based on device
constraints. Defaults to ``None`` in which case ``kwargs`` must be fully specified
for passing to the ``cut_method``.
replace_wire_cuts (bool): Whether to replace :class:`~.WireCut` nodes with
:class:`~.MeasureNode` and :class:`~.PrepareNode` pairs. Defaults to ``False``.
local_measurement (bool): Whether to use the local-measurement circuit-cutting objective,
i.e. the maximum node-degree of the communication graph, for cut evaluation. Defaults
to ``False`` which assumes global measurement and uses the total number of cuts as the
cutting objective.
kwargs: Additional keyword arguments to be passed to the callable ``cut_method``.
Returns:
nx.MultiDiGraph: Copy of the input graph with :class:`~.WireCut` nodes inserted.
**Example**
Consider the following 4-wire circuit with a single CNOT gate connecting the top (wires
``[0, 1]``) and bottom (wires ``["a", "b"]``) halves of the circuit. Note there's a
:class:`~.WireCut` manually placed into the circuit already.
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.RY(0.2, wires=1)
qml.RX(0.3, wires="a")
qml.RY(0.4, wires="b")
qml.CNOT(wires=[0, 1])
qml.WireCut(wires=1)
qml.CNOT(wires=["a", "b"])
qml.CNOT(wires=[1, "a"])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=["a", "b"])
qml.RX(0.5, wires="a")
qml.RY(0.6, wires="b")
qml.expval(qml.PauliX(wires=[0]) @ qml.PauliY(wires=["a"]) @ qml.PauliZ(wires=["b"]))
>>> print(tape.draw())
0: ──RX(0.1)──╭C──────────╭C───────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C──╰X───────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C──RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X──RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
Since the existing :class:`~.WireCut` doesn't sufficiently fragment the circuit, we can find the
remaining cuts using the default KaHyPar partitioner:
>>> graph = qml.transforms.qcut.tape_to_graph(tape)
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
)
Visualizing the newly-placed cut:
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX(0.1)──╭C───────────────╭C────────╭┤ ⟨X ⊗ Y ⊗ Z⟩
1: ──RY(0.2)──╰X──//──╭C───//──╰X────────│┤
a: ──RX(0.3)──╭C──────╰X──╭C────RX(0.5)──├┤ ⟨X ⊗ Y ⊗ Z⟩
b: ──RY(0.4)──╰X──────────╰X────RY(0.6)──╰┤ ⟨X ⊗ Y ⊗ Z⟩
We can then proceed with the usual process of replacing :class:`~.WireCut` nodes with
pairs of :class:`~.MeasureNode` and :class:`~.PrepareNode`, and then break the graph
into fragments. Or, alternatively, we can directly get such processed graph by passing
``replace_wire_cuts=True``:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
num_fragments=2,
imbalance=0.5,
replace_wire_cuts=True,
)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX(0.1)──────╭C───────────────╭C──┤ ⟨X⟩
1: ──RY(0.2)──────╰X──MeasureNode──│───┤
2: ──PrepareNode───────────────────╰X──┤
a: ──RX(0.3)──────╭C──╭X──╭C────────────RX(0.5)──╭┤ ⟨Y ⊗ Z⟩
b: ──RY(0.4)──────╰X──│───╰X────────────RY(0.6)──╰┤ ⟨Y ⊗ Z⟩
1: ──PrepareNode──────╰C───MeasureNode────────────┤
Alternatively, if all we want to do is to find the optimal way to fit a circuit onto a smaller
device, a :class:`~.CutStrategy` can be used to populate the necessary explorations of cutting
parameters. As an extreme example, if the only device at our disposal is a 2-qubit device, a
simple cut strategy is to simply specify the the ``max_free_wires`` argument (or equivalently
directly passing a :class:`~.Device` to the ``device`` argument):
>>> cut_strategy = qml.transforms.qcut.CutStrategy(max_free_wires=2)
>>> print(cut_strategy.get_cut_kwargs(graph))
[{'num_fragments': 2, 'imbalance': 0.5714285714285714},
{'num_fragments': 3, 'imbalance': 1.4},
{'num_fragments': 4, 'imbalance': 1.75},
{'num_fragments': 5, 'imbalance': 2.3333333333333335},
{'num_fragments': 6, 'imbalance': 2.0},
{'num_fragments': 7, 'imbalance': 3.0},
{'num_fragments': 8, 'imbalance': 2.5},
{'num_fragments': 9, 'imbalance': 2.0},
{'num_fragments': 10, 'imbalance': 1.5},
{'num_fragments': 11, 'imbalance': 1.0},
{'num_fragments': 12, 'imbalance': 0.5},
{'num_fragments': 13, 'imbalance': 0.05},
{'num_fragments': 14, 'imbalance': 0.1}]
The printed list above shows all the possible cutting configurations one can attempt to perform
in order to search for the optimal cut. This is done by directly passing a
:class:`~.CutStrategy` to :func:`~.find_and_place_cuts`:
>>> cut_graph = qml.transforms.qcut.find_and_place_cuts(
graph=graph,
cut_strategy=cut_strategy,
)
>>> print(qml.transforms.qcut.graph_to_tape(cut_graph).draw())
0: ──RX──//─╭C──//────────╭C──//─────────┤ ╭<X@Y@Z>
1: ──RY──//─╰X──//─╭C──//─╰X─────────────┤ │
a: ──RX──//─╭C──//─╰X──//─╭C──//──RX──//─┤ ├<X@Y@Z>
b: ──RY──//─╰X──//────────╰X──//──RY─────┤ ╰<X@Y@Z>
As one can tell, quite a few cuts have to be made in order to execute the circuit on solely
2-qubit devices. To verify, let's print the fragments:
>>> qml.transforms.qcut.replace_wire_cut_nodes(cut_graph)
>>> frags, comm_graph = qml.transforms.qcut.fragment_graph(cut_graph)
>>> for t in frags:
... print(qml.transforms.qcut.graph_to_tape(t).draw())
.. code-block::
0: ──RX──MeasureNode─┤
1: ──RY──MeasureNode─┤
a: ──RX──MeasureNode─┤
b: ──RY──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──MeasureNode─┤
a: ──PrepareNode─╭C──MeasureNode─┤
b: ──PrepareNode─╰X──MeasureNode─┤
1: ──PrepareNode─╭C──MeasureNode─┤
a: ──PrepareNode─╰X──MeasureNode─┤
0: ──PrepareNode─╭C──MeasureNode─┤
1: ──PrepareNode─╰X──────────────┤
b: ──PrepareNode─╭X──MeasureNode─┤
a: ──PrepareNode─╰C──MeasureNode─┤
a: ──PrepareNode──RX──MeasureNode─┤
b: ──PrepareNode──RY─┤ <Z>
0: ──PrepareNode─┤ <X>
a: ──PrepareNode─┤ <Y>
"""
cut_graph = _remove_existing_cuts(graph)
if isinstance(cut_strategy, CutStrategy):
cut_kwargs_probed = cut_strategy.get_cut_kwargs(cut_graph)
# Need to reseed if a seed is passed:
seed = kwargs.pop("seed", None)
seeds = np.random.default_rng(seed).choice(2**15, cut_strategy.trials_per_probe).tolist()
cut_edges_probed = {
(cut_kwargs["num_fragments"], trial_id): cut_method(
cut_graph,
**{
**cut_kwargs,
**kwargs,
"seed": seed,
}, # kwargs has higher precedence for colliding keys
)
for cut_kwargs in cut_kwargs_probed
for trial_id, seed in zip(range(cut_strategy.trials_per_probe), seeds)
}
valid_cut_edges = {}
for (num_partitions, _), cut_edges in cut_edges_probed.items():
# The easiest way to tell if a cut is valid is to just do the fragment graph.
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
num_cuts = sum(isinstance(n, WireCut) for n in cut_graph.nodes)
replace_wire_cut_nodes(cut_graph)
frags, comm = fragment_graph(cut_graph)
max_frag_degree = max(dict(comm.degree()).values())
if _is_valid_cut(
fragments=frags,
num_cuts=num_cuts,
max_frag_degree=max_frag_degree,
num_fragments_requested=num_partitions,
cut_candidates=valid_cut_edges,
max_free_wires=cut_strategy.max_free_wires,
):
key = (len(frags), max_frag_degree)
valid_cut_edges[key] = cut_edges
if len(valid_cut_edges) < 1:
raise ValueError(
"Unable to find a circuit cutting that satisfies all constraints. "
"Are the constraints too strict?"
)
cut_edges = _get_optim_cut(valid_cut_edges, local_measurement=local_measurement)
else:
cut_edges = cut_method(cut_graph, **kwargs)
cut_graph = place_wire_cuts(graph=graph, cut_edges=cut_edges)
if replace_wire_cuts:
replace_wire_cut_nodes(cut_graph)
return cut_graph
def _is_valid_cut(
fragments,
num_cuts,
max_frag_degree,
num_fragments_requested,
cut_candidates,
max_free_wires,
):
"""Helper function for determining if a cut is a valid canditate."""
# pylint: disable=too-many-arguments
k = len(fragments)
key = (k, max_frag_degree)
correct_num_fragments = k <= num_fragments_requested
best_candidate_yet = (key not in cut_candidates) or (len(cut_candidates[key]) > num_cuts)
# pylint: disable=no-member
all_fragments_fit = all(
len(graph_to_tape(f).wires) <= max_free_wires for j, f in enumerate(fragments)
)
return correct_num_fragments and best_candidate_yet and all_fragments_fit
def _get_optim_cut(valid_cut_edges, local_measurement=False):
"""Picks out the best cut from a dict of valid candidate cuts."""
if local_measurement:
min_max_node_degree = min(max_node_degree for _, max_node_degree in valid_cut_edges)
optim_cuts = {
k: cut_edges
for (k, max_node_degree), cut_edges in valid_cut_edges.items()
if (max_node_degree == min_max_node_degree)
}
else:
min_cuts = min(len(cut_edges) for cut_edges in valid_cut_edges.values())
optim_cuts = {
k: cut_edges
for (k, _), cut_edges in valid_cut_edges.items()
if (len(cut_edges) == min_cuts)
}
return optim_cuts[min(optim_cuts)] # choose the lowest num_fragments among best ones.
| 38.741009 | 117 | 0.629905 | 18,050 | 0.143137 | 0 | 0 | 51,302 | 0.406826 | 0 | 0 | 82,413 | 0.653537 |
735799fe024faf41da595642a3d8bdb3ba238a42 | 1,693 | py | Python | tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | Passer-D/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
]
| 1,210 | 2020-08-18T07:57:36.000Z | 2022-03-31T15:06:05.000Z | tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
]
| 37 | 2020-08-24T02:48:38.000Z | 2022-01-30T06:41:52.000Z | tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
]
| 275 | 2020-08-18T08:35:16.000Z | 2022-03-31T15:06:07.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QProgressDialog
class ProgressBarDialog(QWidget):
def __init__(self, title='', label='', minValue=0, maxValue=100, parent=None):
super(ProgressBarDialog, self).__init__(parent)
self.process_bar = QProgressDialog(self)
self.set_bar_window_title(title)
self.set_label_text(label)
self.set_min_value(minValue)
self.set_max_value(maxValue)
self.process_bar.setWindowModality(Qt.WindowModal)
self.setGeometry(800, 300, 580, 570)
self.process_bar.canceled.connect(self.close_bar)
def set_bar_window_title(self, text):
self.process_bar.setWindowTitle(text)
self.setWindowTitle(text)
def set_label_text(self, text):
self.process_bar.setLabelText(text)
def set_min_value(self, minValue):
self.process_bar.setMinimum(minValue)
def set_max_value(self, maxvalue):
self.process_bar.setMaximum(maxvalue)
def set_value(self, value):
self.process_bar.setValue(value)
def close_bar(self):
self.process_bar.close()
def reset_bar(self):
self.process_bar = None
def show(self):
self.process_bar.show()
def is_valid(self):
return bool(self.process_bar)
| 31.351852 | 111 | 0.705848 | 1,216 | 0.718252 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.232723 |
7358c21d44c9b2e4044c283c45da55bafa2452ee | 2,469 | py | Python | 9/main.py | misterwilliam/advent-of-code | b8ddcbc5efcf62c7e5e4244339029783ac9f90b6 | [
"MIT"
]
| null | null | null | 9/main.py | misterwilliam/advent-of-code | b8ddcbc5efcf62c7e5e4244339029783ac9f90b6 | [
"MIT"
]
| null | null | null | 9/main.py | misterwilliam/advent-of-code | b8ddcbc5efcf62c7e5e4244339029783ac9f90b6 | [
"MIT"
]
| null | null | null | import itertools
import unittest
data = """Faerun to Norrath = 129
Faerun to Tristram = 58
Faerun to AlphaCentauri = 13
Faerun to Arbre = 24
Faerun to Snowdin = 60
Faerun to Tambi = 71
Faerun to Straylight = 67
Norrath to Tristram = 142
Norrath to AlphaCentauri = 15
Norrath to Arbre = 135
Norrath to Snowdin = 75
Norrath to Tambi = 82
Norrath to Straylight = 54
Tristram to AlphaCentauri = 118
Tristram to Arbre = 122
Tristram to Snowdin = 103
Tristram to Tambi = 49
Tristram to Straylight = 97
AlphaCentauri to Arbre = 116
AlphaCentauri to Snowdin = 12
AlphaCentauri to Tambi = 18
AlphaCentauri to Straylight = 91
Arbre to Snowdin = 129
Arbre to Tambi = 53
Arbre to Straylight = 40
Snowdin to Tambi = 15
Snowdin to Straylight = 99
Tambi to Straylight = 70"""
def GenPaths(cities):
for path in _GenPathsRec([], list(cities)):
yield path
def _GenPathsRec(stack, cities):
if len(cities) == 0:
yield stack
else:
for i in xrange(len(cities)):
for path in _GenPathsRec(stack + [cities[i]], cities[:i] + cities[i+1:]):
yield path
def CalcDistance(start, dest, distancePairs):
return distancePairs[frozenset((start, dest))]
def CalcPathLength(path, distance_pairs):
length = 0
for i in xrange(len(path) - 1):
length += CalcDistance(path[i], path[i+1], distance_pairs)
return length
def LoadData(data):
distance_pairs = {}
cities = set()
for line in data.split("\n"):
start, _, dest, _, distance = line.split()
cities.add(start)
cities.add(dest)
distance_pairs[frozenset([start, dest])] = int(distance)
return cities, distance_pairs
# ANSWER --------------------------------
cities, distance_pairs = LoadData(data)
longestLength = -1
for path in GenPaths(cities):
length = CalcPathLength(path, distance_pairs)
longestLength = max(longestLength, length)
print longestLength
# TESTS ---------------------------------
class GenPathsTests(unittest.TestCase):
def test_GenPaths(self):
self.assertEqual(
[path for path in GenPaths("abcd")],
[list(permutation) for permutation in itertools.permutations("abcd")])
class CalcPathLengthTests(unittest.TestCase):
def test_CalcPathLength(self):
distance_pairs = {
frozenset(["a", "b"]): 10,
frozenset(["b", "c"]): 20
}
self.assertEqual(CalcPathLength(["a", "b", "c"], distance_pairs), 30)
if __name__ == "__main__":
unittest.main() | 26.548387 | 85 | 0.665857 | 464 | 0.18793 | 320 | 0.129607 | 0 | 0 | 0 | 0 | 849 | 0.343864 |
73592c3ecd42d5c4a472b3d8242eb4b399af73f6 | 1,000 | py | Python | 100-Exercicios/ex039.py | thedennerdev/ExerciciosPython-Iniciante | de36c4a09700353a9a1daa7f1320e416c6201a5c | [
"MIT"
]
| null | null | null | 100-Exercicios/ex039.py | thedennerdev/ExerciciosPython-Iniciante | de36c4a09700353a9a1daa7f1320e416c6201a5c | [
"MIT"
]
| null | null | null | 100-Exercicios/ex039.py | thedennerdev/ExerciciosPython-Iniciante | de36c4a09700353a9a1daa7f1320e416c6201a5c | [
"MIT"
]
| null | null | null | #Exercício Python 39: Faça um programa que leia o ano de nascimento de um jovem e informe, de acordo com a sua idade, se ele ainda vai se alistar ao serviço militar, se é a hora exata de se alistar ou se já passou do tempo do alistamento. Seu programa também deverá mostrar o tempo que falta ou que passou do prazo.
import datetime
current_year = datetime.datetime.today().year
ano_nasc = int(input('Informe o ano de seu nascimento: '))
idade_alistamento = current_year - ano_nasc
if idade_alistamento < 18:
print('Ainda não está na hora de se alistar')
print(f'Sua idade ainda é {idade_alistamento} anos, faltam {18 - idade_alistamento } anos. Aguarde mais um pouco!')
elif idade_alistamento == 18:
print(f'Sua idade já é {idade_alistamento} anos')
print('Você está na idade de se alistar. Não perca tempo!')
else:
print('Você passou do prazo de alistamento.')
print(f'Sua idade é {idade_alistamento} anos, já passou {idade_alistamento - 18} anos. Regularize a situação!') | 62.5 | 315 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.738235 |
735938898c03a603b4b3dd0bb3da69ebc37d8938 | 10,903 | py | Python | fish_dashboard/scrapyd/scrapyd_service.py | SylvanasSun/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
]
| 60 | 2018-03-09T07:06:10.000Z | 2021-11-18T15:53:04.000Z | fish_dashboard/scrapyd/scrapyd_service.py | qiubaiying/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
]
| 1 | 2018-04-03T11:05:54.000Z | 2018-04-03T20:06:41.000Z | fish_dashboard/scrapyd/scrapyd_service.py | qiubaiying/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
]
| 8 | 2018-03-12T03:07:00.000Z | 2021-06-11T05:16:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from fish_core.utils.common_utils import format_dict_to_str, get_current_date, list_to_str, str_to_list
from fish_dashboard.scrapyd.model import ScrapydStatusVO, JobListDO, JobStatus, JobPriority, ProjectListVO, SpiderListVO
from fish_dashboard.scrapyd.scrapyd_db import SqlLite3Agent
class ScrapydTimeoutException(Exception):
pass
class ScrapydJobExtInfoSQLSet():
TABLE_NAME = 'scrapyd_job_ext_info'
DB_FILE_NAME = 'scrapyd.db'
CREATE_TABLE = """CREATE TABLE %s (job_id VARCHAR(32) PRIMARY KEY,
args VARCHAR(20), priority INT(1),
creation_time DATE, logs_name VARCHAR(128), logs_url VARCHAR(255),
project_name VARCHAR(32), project_version VARCHAR(20))""" % TABLE_NAME
INSERT = 'INSERT INTO %s VALUES(?,?,?,?,?,?,?,?)' % TABLE_NAME
SELECT_BY_ID = 'SELECT * FROM %s WHERE job_id = ?' % TABLE_NAME
SELECT_ALL = 'SELECT * FROM %s' % TABLE_NAME
DELETE_BY_ID = 'DELETE FROM %s WHERE job_id = ?' % TABLE_NAME
DELETE_BY_PROJECT_NAME = 'DELETE FROM %s WHERE project_name = ?' % TABLE_NAME
DELETE_BY_PROJECT_VERSION = 'DELETE FROM %s WHERE project_name = ? AND project_version = ?' % TABLE_NAME
def open_sqllite(sql_set):
agent = SqlLite3Agent(sql_set.DB_FILE_NAME)
agent.create_table(sql_set.CREATE_TABLE)
return agent
sqllite_agent = open_sqllite(ScrapydJobExtInfoSQLSet)
def schedule_job(agent,
project_name,
spider_name,
priority=JobPriority.LOW,
setting=None,
job_id=None,
version=None,
args={}
):
jobid = agent.schedule(project_name, spider_name, priority, setting, job_id, version, args)['jobid']
if version is None: version = agent.get_version_list(project_name)['versions'][-1:]
# Save additional information that can't queried by scrapyd api into the database
args_str = format_dict_to_str(args, '=')
current_date = get_current_date()
logs_name, logs_url = agent.get_logs(project_name, spider_name)
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.INSERT,
(jobid, args_str, priority, current_date, list_to_str(logs_name), list_to_str(logs_url),
project_name, version,))
def cancel_job(agent, project_name, job_id):
"""
cancel a job.
If the job is pending, it will be removed. If the job is running, it will be terminated.
"""
prevstate = agent.cancel(project_name, job_id)['prevstate']
if prevstate == 'pending':
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,))
def packing_job_ext_info(job_lsit_DO):
"""
Packing additional information of the job into the job_list_DO(JobListDO)
"""
ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,))
if ext_info is None or len(ext_info) <= 0: return
ext_info = ext_info[0]
job_lsit_DO.args = ext_info[1]
job_lsit_DO.priority = ext_info[2]
job_lsit_DO.creation_time = ext_info[3]
job_lsit_DO.logs_name = str_to_list(ext_info[4], ',')
job_lsit_DO.logs_url = str_to_list(ext_info[5], ',')
def get_scrapyd_status(agent):
# record the amount of the project and spider
project_list = agent.get_project_list()
if project_list['status'] == 'error':
raise ScrapydTimeoutException
project_list = project_list['projects']
spider_list = []
for p in project_list:
s = agent.get_spider_list(project_name=p)
spider_list.extend(s['spiders'])
# get load status of a scrapyd service
load_status_dict = agent.get_load_status()
running = load_status_dict['running']
pending = load_status_dict['pending']
finished = load_status_dict['finished']
scrapydStatusVO = ScrapydStatusVO(running=running,
pending=pending,
finished=finished,
project_amount=len(project_list),
spider_amount=len(spider_list),
job_amount=running + pending + finished
)
return scrapydStatusVO
def add_version(agent, project_name, version, egg):
return agent.add_version(project_name, version, egg)['status']
def delete_project(agent, project_name):
status = agent.delete_project(project_name)['status']
if status == 'ok':
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_PROJECT_NAME, (project_name,))
def delete_project_version(agent, project_name, version):
status = agent.delete_project_version(project_name, version)['status']
if status == 'ok':
sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_PROJECT_VERSION, (project_name, version,))
def get_all_job_list(agent):
"""
Get all job list by each project name then
return three job list on the base of different status(pending,running,finished).
"""
project_list = agent.get_project_list()
if project_list['status'] == 'error':
raise ScrapydTimeoutException
project_list = project_list['projects']
pending_job_list = []
running_job_list = []
finished_job_list = []
for project_name in project_list:
job_list = agent.get_job_list(project_name)
# Extract latest version
project_version = agent.get_version_list(project_name)['versions'][-1:]
for pending_job in job_list['pending']:
pending_job_list.append(JobListDO(project_name=project_name,
project_version=project_version,
job_id=pending_job['id'],
spider_name=pending_job['spider'],
job_status=JobStatus.PENDING
))
for running_job in job_list['running']:
running_job_list.append(JobListDO(project_name=project_name,
project_version=project_version,
job_id=running_job['id'],
spider_name=running_job['spider'],
start_time=running_job['start_time'],
job_status=JobStatus.RUNNING
))
for finished_job in job_list['finished']:
finished_job_list.append(JobListDO(project_name=project_name,
project_version=project_version,
job_id=finished_job['id'],
spider_name=finished_job['spider'],
start_time=finished_job['start_time'],
end_time=finished_job['end_time'],
job_status=JobStatus.FINISHED
))
return pending_job_list, running_job_list, finished_job_list
def get_all_project_list(agent):
project_name_list = agent.get_project_list()
if project_name_list['status'] == 'error':
raise ScrapydTimeoutException
project_name_list = project_name_list['projects']
project_list = []
for project_name in project_name_list:
version_list = agent.get_version_list(project_name)['versions']
spider_list = agent.get_spider_list(project_name)['spiders']
job_amounts = get_job_amounts(agent, project_name=project_name)
project_list.append(ProjectListVO(project_name=project_name,
project_versions=version_list,
latest_project_version=version_list[-1:],
spider_amount=len(spider_list),
spider_names=spider_list,
pending_job_amount=job_amounts['pending'],
running_job_amount=job_amounts['running'],
finished_job_amount=job_amounts['finished']
))
return project_list
def get_all_spider_list(agent):
project_name_list = agent.get_project_list()
if project_name_list['status'] == 'error':
raise ScrapydTimeoutException
project_name_list = project_name_list['projects']
spider_list = []
for project_name in project_name_list:
spider_name_list = agent.get_spider_list(project_name)['spiders']
latest_project_version = agent.get_version_list(project_name)['versions'][-1:]
for spider_name in spider_name_list:
logs_name, logs_url = agent.get_logs(project_name, spider_name)
job_amounts = get_job_amounts(agent, project_name, spider_name)
spider_list.append(SpiderListVO(spider_name=spider_name,
project_name=project_name,
latest_project_version=latest_project_version,
logs_name=logs_name,
logs_url=logs_url,
pending_job_amount=job_amounts['pending'],
running_job_amount=job_amounts['running'],
finished_job_amount=job_amounts['finished']
))
return spider_list
def get_job_amounts(agent, project_name, spider_name=None):
"""
Get amounts that pending job amount, running job amount, finished job amount.
"""
job_list = agent.get_job_list(project_name)
pending_job_list = job_list['pending']
running_job_list = job_list['running']
finished_job_list = job_list['finished']
job_amounts = {}
if spider_name is None:
job_amounts['pending'] = len(pending_job_list)
job_amounts['running'] = len(running_job_list)
job_amounts['finished'] = len(finished_job_list)
else:
job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name])
job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name])
job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name])
return job_amounts
def get_logs_info(agent, project_name, spider_name):
logs_name, logs_url = agent.get_logs(project_name, spider_name)
return {'logs_name': logs_name, 'logs_url': logs_url}
| 46.199153 | 120 | 0.604879 | 902 | 0.08273 | 0 | 0 | 0 | 0 | 0 | 0 | 1,761 | 0.161515 |
7359bba7e09630706e6e5d4a81fb814a396993e5 | 1,750 | py | Python | apps/shared/storage.py | bensternthal/affiliates | e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1 | [
"BSD-3-Clause"
]
| null | null | null | apps/shared/storage.py | bensternthal/affiliates | e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1 | [
"BSD-3-Clause"
]
| null | null | null | apps/shared/storage.py | bensternthal/affiliates | e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1 | [
"BSD-3-Clause"
]
| null | null | null | import os
from tempfile import mkstemp
from django.conf import settings
from django.core.files import locks
from django.core.files.move import file_move_safe
from django.core.files.storage import FileSystemStorage
from django.utils.text import get_valid_filename
class OverwritingStorage(FileSystemStorage):
"""
File storage that allows overwriting of stored files.
Modified from http://djangosnippets.org/snippets/2173/
"""
def get_available_name(self, name):
return name
def _save(self, name, content):
"""
Lifted partially from django/core/files/storage.py
"""
full_path = self.path(name)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# Ensure that content is open
content.open()
if hasattr(content, 'temporary_file_path'):
# Content has a file that we can move.
temp_data_location = content.temporary_file_path()
file_move_safe(temp_data_location, full_path, allow_overwrite=True)
else:
# Write the content stream to a temporary file and move it.
fd, tmp_path = mkstemp()
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
os.write(fd, chunk)
locks.unlock(fd)
os.close(fd)
file_move_safe(tmp_path, full_path, allow_overwrite=True)
content.close()
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
| 32.407407 | 79 | 0.652571 | 1,484 | 0.848 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.22 |
735a0631d562698eec79867185c8831049a8bf3f | 3,783 | py | Python | bin/dupeFinder.py | kebman/dupe-finder-py | 3ac23da711577466043b5032a4022516f4ccef95 | [
"BSD-3-Clause"
]
| 1 | 2018-02-17T09:00:48.000Z | 2018-02-17T09:00:48.000Z | bin/dupeFinder.py | kebman/dupe-finder-py | 3ac23da711577466043b5032a4022516f4ccef95 | [
"BSD-3-Clause"
]
| null | null | null | bin/dupeFinder.py | kebman/dupe-finder-py | 3ac23da711577466043b5032a4022516f4ccef95 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python2
import os
import hashlib
import datetime
import sqlite3
from sqlite3 import Error
def sha256(fname):
"""Return sha256 hash from input file (fname).
:param fname:
:return: Sha256 hash digest in hexadecimal"""
hash_sha256 = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(65536), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def getHRT(timestamp):
"""Get human readable time from a Python timestamp.
:param timestamp:
:return: Human readable timestamp (HRT)"""
dtval = datetime.datetime.fromtimestamp(timestamp)
return dtval.strftime('%Y-%m-%d %H:%M:%S')
def getSQLT(timestamp):
"""Make timestamp for SQLite from Python timestamp, meaning a UNIX epoch INTEGER.
:param timestamp:
:return: SQLite compatible timestamp in the form of a UNIX epoch INTEGER"""
# I know this is a very small function, but now it's clear what SQL needs
return int(timestamp)
def create_connection(db_file):
"""Create a database connection to the SQLite database specified by db_file
:param db_file: database file
:return: Connection object or None"""
try:
connection = sqlite3.connect(db_file)
return connection
except Error as e:
print(e)
return None
def check_exists(connection, path):
"""Check the file path in the SQL filepaths table.
:param connection:
:param path:
:return: path id"""
exists = '''SELECT EXISTS(SELECT 1 FROM filepaths
WHERE filepath = ?);'''
cursor = connection.cursor()
cursor.execute(exists, (path,))
return cursor.fetchone()
def get_path(connection, path):
"""Get the file path in the SQL filepaths table.
:param connection:
:param path:
:return: path id"""
select = '''SELECT id FROM filepaths
WHERE filepath = ?;'''
cursor = connection.cursor()
cursor.execute(select, (path,))
return cursor.fetchone()[0]
def store_path(connection, path):
"""Store the file path in the SQL filepaths table.
:param connection:
:param path:
:return: path id"""
insert = '''INSERT OR IGNORE INTO filepaths(filepath)
VALUES(?)'''
cursor = connection.cursor()
cursor.execute(insert, (path,))
return cursor.lastrowid
def store_file(connection, file):
"""Store the file, hash and relevant file attributes in the SQL files table.
:param connection:
:param file:
:return: Filepath ID"""
sql = '''INSERT INTO files(filename, checksum, filesize, btime, ctime, mtime, filepath_id)
VALUES(?, ?, ?, ?, ?, ?, ?)'''
cursor = connection.cursor()
cursor.execute(sql, file)
return None
# return cursor.lastrowid
def main():
path = "."
# UX (and OS X) spesific path names
# homedir = os.path.expanduser('~')
db_file = "db/pythonsqlite.db"
connection = create_connection(db_file)
with connection:
os.chdir(path)
for entry in os.walk("."):
folder = str(entry[0])
for file in entry[2]:
filepath = os.getcwd() + folder[1:] #[1:] cuts out the preceding dot
# only write if exists
exists = check_exists(connection, filepath)
if exists[0]:
filepath_id = get_path(connection, filepath)
# print('Fetched '+ str(filepath_id))
else:
filepath_id = store_path(connection, filepath)
# print('Written '+ str(filepath_id))
fullpathfile = os.getcwd() + folder[1:] + "/" + file
file = file
checksum = sha256(fullpathfile)
size = os.stat(fullpathfile).st_size
bstamp = os.stat(fullpathfile).st_birthtime
cstamp = os.stat(fullpathfile).st_ctime
mstamp = os.stat(fullpathfile).st_mtime
fileInfo = (file, checksum, size, bstamp, cstamp, mstamp, filepath_id)
store_file(connection, fileInfo)
# test print:
# print(str(getSQLT(birthstamp)) + " " + sha256(fullpathfile) + " " + fullpathfile + " " + str(size) + "b")
if __name__ == '__main__':
main()
| 29.787402 | 111 | 0.694422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,790 | 0.473169 |
735afc924941206a74f98559fb49787e7b5af8e7 | 309 | py | Python | Python/factorialIterative.py | Ricardoengithub/Factorial | 0c45201bbe1ad94bf0d090381eb662cf2a281fda | [
"MIT"
]
| null | null | null | Python/factorialIterative.py | Ricardoengithub/Factorial | 0c45201bbe1ad94bf0d090381eb662cf2a281fda | [
"MIT"
]
| null | null | null | Python/factorialIterative.py | Ricardoengithub/Factorial | 0c45201bbe1ad94bf0d090381eb662cf2a281fda | [
"MIT"
]
| null | null | null | def factorial(n):
fact = 1
for i in range(2,n+1):
fact*= i
return fact
def main():
n = int(input("Enter a number: "))
if n >= 0:
print(f"Factorial: {factorial(n)}")
else:
print(f"Choose another number")
if __name__ == "__main__":
main()
| 15.45 | 43 | 0.501618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.2589 |
735c291e6927c7998102106ab071603c6808076b | 5,919 | py | Python | code_old/sort.py | benwoo1110/A-List-of-Sorts-v2 | 2d404bda6c6ddc689e705cad6966f2a656ddac2f | [
"MIT"
]
| 6 | 2020-06-29T01:57:44.000Z | 2022-01-14T09:00:03.000Z | code_old/sort.py | benwoo1110/A-List-of-Sorts-v2 | 2d404bda6c6ddc689e705cad6966f2a656ddac2f | [
"MIT"
]
| null | null | null | code_old/sort.py | benwoo1110/A-List-of-Sorts-v2 | 2d404bda6c6ddc689e705cad6966f2a656ddac2f | [
"MIT"
]
| 1 | 2021-03-26T04:30:37.000Z | 2021-03-26T04:30:37.000Z | ######################################
# Import and initialize the librarys #
#####################################
from code.pygame_objects import *
from code.algorithm.bubblesort import bubblesort
from code.algorithm.insertionsort import insertionsort
from code.algorithm.bogosort import bogosort
from code.algorithm.mergesort import mergesort
from code.algorithm.quicksort import quicksort
from code.algorithm.radixsort import radixsort
from code.algorithm.selectionsort import selectionsort
from code.algorithm.commonFunc import commonFunc
#################
# Setup logging #
#################
filename = os.path.basename(__file__).split('.')[0]
logger = log.get_logger(filename)
logger.info('Loading up {}...'.format(filename))
sort_screen = screen(
name = 'sort',
surfaceParameters = {
'frame': coord(w=1024, h=768)
},
objectsParameters = {
'background': {
'type': 'object',
'frame': {
'image': coord(w=1024, h=768)
},
},
'sort_title': {
'type': 'title',
'frame': {
'image': coord(w=1024, h=135)
},
},
'back': {
'type': 'button',
'frame': {
'box': coord(x=71, y=41, w=112, h=61),
'image': coord(x=71, y=41, w=112, h=61)
},
'runclass': runclass(action='go_back')
},
'info': {
'type': 'button',
'frame': {
'box': coord(x=841, y=40, w=112, h=61),
'image': coord(x=841, y=40, w=112, h=61),
},
'runclass': runclass(action='info')
},
'speed': {
'type': 'text',
'frame': {
'image': coord(x=349, y=630, w=254, h=40),
'text': coord(x=349, y=630, w=254, h=40)
},
'data': text(
text = '10',
editable = False,
suffix = ' sec per move',
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'moves': {
'type': 'text',
'frame': {
'image': coord(x=436, y=677, w=112, h=40),
'text': coord(x=436, y=677, w=112, h=40)
},
'data': moves(
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'time_taken': {
'type': 'text',
'frame': {
'image': coord(x=768, y=630, w=177, h=40),
'text': coord(x=768, y=630, w=177, h=40)
},
'data': timer(
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'list_length': {
'type': 'text',
'frame': {
'image': coord(x=759, y=677, w=112, h=186),
'text': coord(x=759, y=677, w=112, h=186)
},
'data': text(
text = '100',
editable = False,
suffix = ' bars',
format = textFormat(
fontType=pg_ess.font.futura,
fontSize=28,
colour=pg_ess.colour.black
)
),
'dataAddSelf': True,
},
'sortbox': {
'type': 'object',
'frame': {
'box': coord(x=52, y=145, w=922, h=430),
'image': coord(x=52, y=145, w=922, h=430)
},
'data': sortbars(
bars=10,
),
'dataAddSelf': True,
}
}
)
runSort = {
'Bubble sort': bubblesort.run,
'Insertion sort': insertionsort.run,
'Merge sort': mergesort.run,
'Quick sort': quicksort.run,
'Radix sort': radixsort.run,
'Bogo sort': bogosort.run,
'Selection sort': selectionsort.run
}
class sort:
@staticmethod
def run(screen, sortType:str, bars:int, speed:float):
# Set data from parent
sort_screen.objects.sort_title.switchState(sortType, withDisplay=False)
if sort_screen.objects.sortbox.data.bars != int(bars): sort_screen.objects.sortbox.data.bars = int(bars)
else: sort_screen.objects.sortbox.data.genBars()
sort_screen.objects.speed.data.setText(str(speed), withDisplay=False)
sort_screen.objects.list_length.data.setText(str(bars), withDisplay=False)
sort_screen.objects.moves.data.reset()
sort_screen.objects.time_taken.data.resetTimer()
# Display sort screen
sort_screen.surface.display()
# Buffer time before sort starts
action_result = commonFunc.waitAction(sort_screen, 0.5)
if action_result != None: return action_result
sort_result = runSort[sortType](sort_screen, speed)
if sort_result != None: return sort_result
while True:
# Get check for interaction with screen
action_result = sort_screen.event.action()
# No action
if action_result == None: continue
# When program is set to close
if action_result.contains('outcome','__quit__'): return '__quit__'
# Going back
if action_result.contains('outcome', 'go_back'): return '__back__'
# Load back screen
if action_result.contains('outcome', '__back__'): sort_screen.surface.display(withLoad=False) | 31.994595 | 112 | 0.487751 | 1,542 | 0.260517 | 0 | 0 | 1,525 | 0.257645 | 0 | 0 | 1,078 | 0.182125 |
735e36175591a886d021d1f42c7e0f23a0bc609d | 489 | py | Python | catkin_ws/src/tutorials/scripts/number_sub.py | vipulkumbhar/AuE893Spring19_VipulKumbhar | f741d5299b2804fd541b2bba64b8a4fba8521f33 | [
"MIT"
]
| 3 | 2020-12-04T22:00:12.000Z | 2022-02-09T15:53:14.000Z | catkin_ws/src/tutorials/scripts/number_sub.py | vipulkumbhar/AuE893Spring19_VipulKumbhar | f741d5299b2804fd541b2bba64b8a4fba8521f33 | [
"MIT"
]
| 1 | 2020-04-15T19:58:30.000Z | 2020-04-15T19:58:30.000Z | catkin_ws/src/tutorials/scripts/number_sub.py | vipulkumbhar/AuE893Spring19_VipulKumbhar | f741d5299b2804fd541b2bba64b8a4fba8521f33 | [
"MIT"
]
| 1 | 2020-05-21T21:59:21.000Z | 2020-05-21T21:59:21.000Z | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int64
counter = 0
pub = None
def callback_number(msg):
global counter
counter += msg.data
new_msg = Int64()
new_msg.data = counter
pub.publish(new_msg)
rospy.loginfo(counter)
if __name__ == '__main__':
rospy.init_node('number_counter')
sub = rospy.Subscriber("/number", Int64, callback_number)
pub = rospy.Publisher("/number_count", Int64, queue_size =10)
rospy.spin()
| 18.807692 | 66 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.145194 |
735e8db4e1d5d21ba03d9d6374f1111bc5cde6f4 | 806 | py | Python | setup.py | eddo888/perdy | 616473e9bde3ad58dc1ebf054fb78a7cc48c3adf | [
"MIT"
]
| null | null | null | setup.py | eddo888/perdy | 616473e9bde3ad58dc1ebf054fb78a7cc48c3adf | [
"MIT"
]
| null | null | null | setup.py | eddo888/perdy | 616473e9bde3ad58dc1ebf054fb78a7cc48c3adf | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import codecs
from os import path
from setuptools import setup
pwd = path.abspath(path.dirname(__file__))
with codecs.open(path.join(pwd, 'README.md'), 'r', encoding='utf8') as input:
long_description = input.read()
version='1.7'
setup(
name='Perdy',
version=version,
license='MIT',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/eddo888/perdy',
download_url='https://github.com/eddo888/perdy/archive/%s.tar.gz'%version,
author='David Edson',
author_email='[email protected]',
packages=[
'Perdy',
],
install_requires=[
'pytz',
'arrow',
'xmltodict',
'PyYAML',
'jsonpath',
'argcomplete',
'Baubles',
],
scripts=[
'bin/parser.py',
'bin/pyson.py',
'bin/colourize.py',
],
)
| 19.190476 | 77 | 0.682382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.384615 |
736056399bf64b21d6f7dca419596b81048da99f | 2,658 | py | Python | utils/slack_send.py | IntelliGrape/pennypincher | d0d503eb8a480bf28f308ff52834170cca5a53d7 | [
"MIT"
]
| null | null | null | utils/slack_send.py | IntelliGrape/pennypincher | d0d503eb8a480bf28f308ff52834170cca5a53d7 | [
"MIT"
]
| null | null | null | utils/slack_send.py | IntelliGrape/pennypincher | d0d503eb8a480bf28f308ff52834170cca5a53d7 | [
"MIT"
]
| null | null | null | from tabulate import tabulate
from slack.errors import SlackApiError
import sys
import logging
import slack
class Slackalert:
"""To send cost report on slack."""
def __init__(self, channel=None, slack_token=None):
self.channel = channel
self.slack_token = slack_token
logging.basicConfig(level=logging.WARNING)
self.logger = logging.getLogger()
def get_resource_list(self, resource_name, resource_info, resource_header, resource_list, resource_savings):
"""Returns all the idle resource information in a dictionary format."""
resource_list.insert(0, resource_header)
resource_info[resource_name] = {}
resource_info[resource_name]['Resources'] = resource_list
resource_info[resource_name]['Savings'] = resource_savings
return resource_info
def slack_alert(self, resource_info, account_name, total_savings):
"""Creates a txt file which contains the cost report and sends to the slack channel."""
try:
client = slack.WebClient(token=self.slack_token)
f = open("/tmp/cost_optimization_report.txt", "w+")
for res in resource_info.keys():
#Converts resource info dictionary to tabular format.
f.write('\n' + 'Resource: ' + res + '\n')
resource_table = tabulate(resource_info[res]['Resources'][1:],
headers=resource_info[res]['Resources'][0], tablefmt="grid",
disable_numparse=True)
f.write('\n' + resource_table + '\n \n' + 'Savings: $' + str(resource_info[res]['Savings']) + '\n')
f.close()
response = client.files_upload(
file='/tmp/cost_optimization_report.txt',
initial_comment='Cost Optimization Report | ' + account_name + ' | Total Savings: $' + str(total_savings),
channels=self.channel
)
print("Sending the Cost Optimization report to slack "+ self.channel)
except SlackApiError as e:
"""You will get a SlackApiError if "ok" is False."""
assert e.response["ok"] is False
assert e.response["error"]
"""str like 'invalid_auth', 'channel_not_found'."""
self.logger.error("Slack api error: {e.response['error']} | Error in slack_send.py")
sys.exit(1)
except Exception as e:
self.logger.error(
"Error on line {} in slack_send.py".format(sys.exc_info()[-1].tb_lineno) + " | Message: " +
str(e))
sys.exit(1)
| 45.050847 | 122 | 0.598947 | 2,547 | 0.958239 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.282543 |
73612698a39e054c2b652bdecf1e853efdbc6d55 | 526 | py | Python | src/importer/importer.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
]
| null | null | null | src/importer/importer.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
]
| null | null | null | src/importer/importer.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
]
| null | null | null | import logging
from abc import ABC, abstractmethod
from pony.orm import db_session, commit
log = logging.getLogger(__name__)
class Importer(ABC):
def __init__(self, TargetEntity):
self.TargetEntity = TargetEntity
@db_session
def truncate(self):
log.info('Truncating target tables...')
self.TargetEntity.select().delete(bulk=True)
commit()
log.info('...done!')
@abstractmethod
def __iter__(self):
"""iterate over items to be imported"""
return
| 21.04 | 52 | 0.653992 | 396 | 0.752852 | 0 | 0 | 284 | 0.539924 | 0 | 0 | 78 | 0.148289 |
73617c822f5af71e4276c1b4c85554260d13ae06 | 982 | py | Python | news/pybo/migrations/0006_auto_20211010_0322.py | Smashh712/nrib | 375c9625e9efa6bb9a6f466312de3c6fcd5818a4 | [
"MIT"
]
| null | null | null | news/pybo/migrations/0006_auto_20211010_0322.py | Smashh712/nrib | 375c9625e9efa6bb9a6f466312de3c6fcd5818a4 | [
"MIT"
]
| null | null | null | news/pybo/migrations/0006_auto_20211010_0322.py | Smashh712/nrib | 375c9625e9efa6bb9a6f466312de3c6fcd5818a4 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.7 on 2021-10-09 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pybo', '0005_auto_20211010_0320'),
]
operations = [
migrations.AddField(
model_name='issue',
name='agree_representor_id',
field=models.CharField(default='', max_length=20, null=True),
),
migrations.AddField(
model_name='issue',
name='disagree_representor_id',
field=models.CharField(default='', max_length=20, null=True),
),
migrations.AlterField(
model_name='issue',
name='agree_representor',
field=models.CharField(default='', max_length=20, null=True),
),
migrations.AlterField(
model_name='issue',
name='disagree_representor',
field=models.CharField(default='', max_length=20, null=True),
),
]
| 28.882353 | 73 | 0.580448 | 889 | 0.905295 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.205703 |
7361d838090b7ba746e73857fad1d1b69e7ce317 | 852 | py | Python | anno_gen/modify_filesprocessed.py | KevinQian97/diva_toolbox | de83de7f7602665c92dca943ab2a0b4c1b2fdfde | [
"Apache-2.0"
]
| null | null | null | anno_gen/modify_filesprocessed.py | KevinQian97/diva_toolbox | de83de7f7602665c92dca943ab2a0b4c1b2fdfde | [
"Apache-2.0"
]
| null | null | null | anno_gen/modify_filesprocessed.py | KevinQian97/diva_toolbox | de83de7f7602665c92dca943ab2a0b4c1b2fdfde | [
"Apache-2.0"
]
| 1 | 2021-09-29T04:10:10.000Z | 2021-09-29T04:10:10.000Z | import json
import os
def get_file_index(filesProcessed):
new_dict = {}
for f in filesProcessed:
new_dict[f]={"framerate": 30.0, "selected": {"0": 1, "9000": 0}}
return new_dict
ref = json.load(open("/home/lijun/downloads/kf1_meta/references/kf1_all.json","r"))
files = ref["filesProcessed"]
print(len(files))
output = json.load(open("/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output.json","r"))
output["filesProcessed"] = files
jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/output-mod.json"
with open(jname,'w') as j:
json.dump(output,j,indent=2,ensure_ascii=False)
file_dict = get_file_index(files)
jname = "/mnt/ssdb/kevinq/adaptive_temporal_shift_module/exp/iod_kf1_all/file-index.json"
with open(jname,'w') as j:
json.dump(file_dict,j,indent=2,ensure_ascii=False)
| 32.769231 | 107 | 0.738263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.433099 |
73622863ce396d64c3c5ebe2afec91bcbe2b4043 | 2,561 | py | Python | monotone_bipartition/search.py | mvcisback/monotone-bipartition | c92262fac14258ed25619681ebcb0f8734044d22 | [
"MIT"
]
| 1 | 2017-05-17T22:47:33.000Z | 2017-05-17T22:47:33.000Z | monotone_bipartition/search.py | mvcisback/multidim-threshold | c92262fac14258ed25619681ebcb0f8734044d22 | [
"MIT"
]
| 10 | 2019-04-01T17:05:14.000Z | 2020-05-01T17:23:18.000Z | monotone_bipartition/search.py | mvcisback/monotone-bipartition | c92262fac14258ed25619681ebcb0f8734044d22 | [
"MIT"
]
| 4 | 2017-02-03T01:30:03.000Z | 2018-04-25T22:28:23.000Z | from enum import Enum, auto
import funcy as fn
import numpy as np
from monotone_bipartition import rectangles as mdtr
from monotone_bipartition import refine
EPS = 1e-4
class SearchResultType(Enum):
TRIVIALLY_FALSE = auto()
TRIVIALLY_TRUE = auto()
NON_TRIVIAL = auto()
def diagonal_convex_comb(r):
bot, top = np.array(r.bot), np.array(r.top)
diag = top - bot
return lambda t: bot + t * diag
def binsearch(r, oracle, eps=EPS, find_lambda=False):
"""Binary search over the diagonal of the rectangle.
Returns the lower and upper approximation on the diagonal.
"""
f = diagonal_convex_comb(r)
feval = fn.compose(oracle, f)
lo, hi = 0, 1
# Early termination via bounds checks
if feval(lo):
result_type = SearchResultType.TRIVIALLY_TRUE
hi = 0
elif not feval(hi):
result_type = SearchResultType.TRIVIALLY_FALSE
else:
result_type = SearchResultType.NON_TRIVIAL
mid = lo
while hi - lo > eps:
mid = lo + (hi - lo) / 2
lo, hi = (lo, mid) if feval(mid) else (mid, hi)
if find_lambda:
if result_type == SearchResultType.TRIVIALLY_TRUE:
return result_type, -1
elif result_type == SearchResultType.TRIVIALLY_FALSE:
return result_type, 2
return result_type, (lo+hi)/2
else:
return result_type, mdtr.to_rec(zip(f(lo), f(hi)))
def line_intersect(func, point, tol, *, percent=False):
box_intersect = np.array(point) / max(point)
origin = [0]*len(point)
rec = mdtr.to_rec(zip(origin, box_intersect)) # Compute bounding rec.
return binsearch(rec, func, eps=tol, find_lambda=percent)[1]
def lexicographic_opt(func, ordering, tol):
dim = len(ordering)
assert set(fn.pluck(0, ordering)) == set(range(dim))
tol /= dim # Need to compensate for multiple binsearches.
rec = refine.bounding_box(
domain=mdtr.unit_rec(dim),
oracle=func
)
# If polarity is True, set initial value at bounding.top.
# O.w. use bounding.bot.
base = tuple((rec.top if p else rec.bot)[i] for i, p in sorted(ordering))
res_rec = mdtr.to_rec(zip(base, base))
for idx, polarity in ordering:
oracle = func
rec = mdtr.to_rec(
(0, 1) if i == idx else (p, p) for i, p in enumerate(base)
)
result_type, res_cand = binsearch(rec, oracle, eps=tol)
if result_type == SearchResultType.NON_TRIVIAL:
res_rec = res_cand
base = res_rec.bot
return res_rec
| 28.775281 | 77 | 0.636861 | 111 | 0.043342 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.121437 |
73623a0c8d94829ad21399f5bae6f22979a769e7 | 1,562 | py | Python | api/web/apps/auth/views.py | procool/itstructure | 6aa3a43e1a759f5509f130ddf911779645dc89d0 | [
"BSD-2-Clause"
]
| null | null | null | api/web/apps/auth/views.py | procool/itstructure | 6aa3a43e1a759f5509f130ddf911779645dc89d0 | [
"BSD-2-Clause"
]
| null | null | null | api/web/apps/auth/views.py | procool/itstructure | 6aa3a43e1a759f5509f130ddf911779645dc89d0 | [
"BSD-2-Clause"
]
| null | null | null | from flask import url_for
from flaskcbv.view import View
from flaskcbv.conf import settings
from misc.mixins import HelperMixin
from misc.views import JSONView
class authView(JSONView):
def helper(self):
return """Authorizaion handler
Use "login" and "passwd" arguments by GET or POST to get session
"""
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def post(self, *args, **kwargs):
try:
username = self.get_argument_smart('username')
passwd = self.get_argument_smart('password')
except Exception as err:
self.abort_error(errno=-1, error="wrong_params", details="set arguments: 'username', 'passwd'")
r = settings._BB_CLIENT.login(username, passwd)
answ = r.as_dict
del answ["cmd"]
del answ["token"]
self.abort_error(**answ)
class sessionView(JSONView):
def helper(self):
return """Session check handler
Use "session" argument by GET or POST to check your session
"""
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def post(self, *args, **kwargs):
try:
session = self.get_argument_smart('session')
except Exception as err:
self.abort_error(errno=-1, error="wrong_params", details="set argument: 'session'")
r = settings._BB_CLIENT.session(session)
answ = r.as_dict
del answ["cmd"]
del answ["token"]
self.abort_error(**answ)
| 26.474576 | 107 | 0.608195 | 1,380 | 0.883483 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.227913 |
7363b08e9959a774b4c96272382532b62b203a94 | 2,069 | py | Python | tests/test_heart_forest.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
]
| 34 | 2020-03-06T07:53:43.000Z | 2022-03-13T06:12:29.000Z | tests/test_heart_forest.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
]
| 6 | 2021-06-08T22:43:23.000Z | 2022-03-08T13:57:33.000Z | tests/test_heart_forest.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
]
| 1 | 2020-11-30T21:20:32.000Z | 2020-11-30T21:20:32.000Z | from pykitml.testing import pktest_graph, pktest_nograph
@pktest_graph
def test_heart_forest():
import os.path
import numpy as np
import pykitml as pk
from pykitml.datasets import heartdisease
# Download the dataset
if(not os.path.exists('heartdisease.pkl')): heartdisease.get()
# Load heart data set
inputs, outputs = heartdisease.load()
outputs = pk.onehot(outputs)
# Create model
ftypes = [
'continues', 'categorical', 'categorical',
'continues', 'continues', 'categorical', 'categorical',
'continues', 'categorical', 'continues', 'categorical',
'categorical', 'categorical'
]
forest_heart_classifier = pk.RandomForest(13, 2, max_depth=8, feature_type=ftypes)
# Train
forest_heart_classifier.train(inputs, outputs)
# Save it
pk.save(forest_heart_classifier, 'forest_heart_classifier.pkl')
# Print accuracy
accuracy = forest_heart_classifier.accuracy(inputs, outputs)
print('Accuracy:', accuracy)
# Plot confusion matrix
forest_heart_classifier.confusion_matrix(inputs, outputs,
gnames=['False', 'True'])
# Assert accuracy
assert (forest_heart_classifier.accuracy(inputs, outputs)) >= 94
@pktest_nograph
def test_predict_heart_forest():
import os.path
import numpy as np
import pykitml as pk
# Predict heartdisease for a person with
# age sex cp trestbps chol fbs restecg thalach exang oldpeak slope ca thal
# 67, 1, 4, 160, 286, 0, 2, 108, 1, 1.5, 2, 3, 3
input_data = np.array([67, 1, 4, 160, 286, 0, 2, 108, 1, 1.5, 2, 3, 3], dtype=float)
# Load the model
forest_heart_classifier = pk.load('forest_heart_classifier.pkl')
# Get output
forest_heart_classifier.feed(input_data)
model_output = forest_heart_classifier.get_output()
# Print result (log of probabilities)
print(model_output)
if __name__ == '__main__':
try:
test_heart_forest.__wrapped__()
test_predict_heart_forest.__wrapped__()
except AssertionError:
pass | 29.140845 | 88 | 0.685839 | 0 | 0 | 0 | 0 | 1,844 | 0.891252 | 0 | 0 | 627 | 0.303045 |
736486ab642c356a4d5f9aa4e677a035c93276d3 | 25,682 | py | Python | pdf_audit.py | marctjones/perception | 9a9fe4e5cef6a2aa66544066d8c03e0e9c3b0528 | [
"MIT"
]
| null | null | null | pdf_audit.py | marctjones/perception | 9a9fe4e5cef6a2aa66544066d8c03e0e9c3b0528 | [
"MIT"
]
| null | null | null | pdf_audit.py | marctjones/perception | 9a9fe4e5cef6a2aa66544066d8c03e0e9c3b0528 | [
"MIT"
]
| null | null | null | from globals import Globals
import os
import subprocess
import datetime as dt
from urllib import \
request as request
# urlopen
from io import \
StringIO, BytesIO
import string
import requests
import re
import csv
import threading
import utils as utils
import time
import datetime as datetime
import multiprocessing
from report import PDFItem
from PyPDF2 import PdfFileReader
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import resolve1
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.layout import LAParams # , LTTextBox, LTTextLine
from threading import Thread, Event
stop_event = Event()
global document
class PDFAudit:
def __init__(self):
self.report_folder = ''
self.document_folder = ''
self.pdf_path = ''
self.report_name = ''
self.csv_header = []
self.gbl_report_folder = Globals.gbl_report_folder + self.report_folder
self.log = self.gbl_report_folder + 'logs\\'
self.document_t = PDFDocument
self.parser = PDFParser
self.url = ''
self.line_count = 1
def load_pdf(self, PDFDocument, password):
i = 0
while threading.currentThread().is_alive():
i += 1
report_path = self.report_folder + self.report_name
print('LOADING: ' + i.__str__())
time.sleep(1)
# try:
self.document_t = PDFDocument(self.parser)
# except Exception as e:
# print('PDFDocument(self.parser) FAILED ::::: ' + e.__str__())
if stop_event.is_set():
if i >= 120:
# print(self.parser.fp.name + ' FAILED (SEC): ' + i.__str__())
print(' >>> FAIL : PDF LOAD STOP EVENT : 120 SECONDS')
row = [self.line_count, 'PDFDocument FAILED TO LOAD - 90 SEC TIMEOUT REACHED FOR: ' + self.url,
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', ]
# self.line_count += 1
# 90 SECOND TIMEOUT or FAILED TO PARSER
with open(report_path, 'a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
writer.dialect.lineterminator.replace('\n', '')
writer.writerow(row)
break
def thread_monitor(self, process_name, thread):
i = 0
while thread.is_alive():
time.sleep(2)
i += 2
print(process_name + ' WORKING FOR ' + i.__str__() + ' seconds for: ' + thread.getName())
print('ACTIVE COUNT: ' + str(threading.active_count()))
if i == 180:
print(thread.getName() + ' KILLED AT 180 SECONDS')
report_path = self.report_folder + self.report_name
row = [self.line_count, 'PDF THREAD FAILED TO PROCESS - 180 SEC TIMEOUT REACHED FOR: ' + self.url,
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ]
# self.line_count += 1
# 120 SECOND TIMEOUT
with open(report_path, 'a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
writer.dialect.lineterminator.replace('\n', '')
writer.writerow(row)
break
print(process_name + ':[COMPLETED IN ' + i.__str__() + ' seconds for: ' + thread.getName() + ']')
def pdf_csv(self, csv_to_audit, source_folder, scope):
# Define CSV
self.csv_header = (['csvline', 'url', 'filename', 'local_path',
'encrypted', 'decrypt_pass', 'istagged', 'pages', 'toc', 'form', 'fields', 'tables',
'word_count', 'char_count', 'words_per_page', 'chars_per_word', 'image_count',
'%_img_per_page', 'ocr_risk', 'author', 'creator', 'producer', 'subject', 'title', 'text'])
# root_path = os.path.split(source_folder)[0]
self.report_folder = os.path.split(source_folder)[0].replace('SPIDER', '')
# Set logs
self.log = os.path.join(self.report_folder, 'logs')
if not os.path.exists(self.log):
os.makedirs(self.log)
self.report_folder = os.path.join( self.report_folder, 'PDF')
if not os.path.exists(self.report_folder):
os.makedirs(self.report_folder)
# os.chdir(self.report_folder)
if csv_to_audit.find('internal') >= 0 or scope == 'internal':
self.log = os.path.join(self.log, '_pdf_internal_log.txt')
self.report_name = csv_to_audit[:-4] + '_a.csv'
if csv_to_audit.find('external') >= 0 or scope == 'external':
self.log = os.path.join(self.log, '_pdf_external_log.txt')
self.report_name = csv_to_audit[:-4] + '_a.csv'
self.document_folder = self.report_folder
if not os.path.exists(self.document_folder):
os.makedirs(self.document_folder)
try:
write_header = False
report_path = self.report_folder + self.report_name
if not os.path.exists(report_path):
write_header = True
os.chdir(self.report_folder)
with open(report_path, 'a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
if write_header:
writer.writerow(self.csv_header)
except Exception as e:
print('PDF I/O error: ' + e.__str__())
csv_source = os.path.join(source_folder, csv_to_audit)
row_count = sum(1 for row in csv.reader(open(csv_source, 'r',
encoding='utf8'), delimiter=','))
row_count_i = row_count - 2
with open(csv_source, encoding='utf8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
# set number of threads
thread_count = 1
destination_folder = self.report_name
# Get URL for PDF from row[1]
# FOR EACH PDF
first_line = True
for row in csv_reader:
pdf_url = row[0]
skip = False
if first_line:
first_line = False
print(' ::: START ALL PDF :::')
continue
elif os.path.exists(destination_folder):
with open(destination_folder, encoding='utf8') as completed_urls:
completed_urls_reader = csv.reader(completed_urls, delimiter=',')
jump = True
fl = True
skip = False
for completed_url in completed_urls_reader:
if fl:
jump = True
fl = False
continue
if pdf_url in completed_url[1]:
msg = (' >>> Remaining PDFs: ' + row_count_i.__str__() + ' out of ' +
row_count.__str__() + ' ' + (datetime.datetime.now().__str__()[:-7]))
row_count_i -= 1
# self.line_count += 1
utils.logline(self.log, msg)
print(msg)
fl = False
skip = True
break
# completed_urls.close()
try:
if skip:
skip = False
continue
self.line_count = csv_reader.line_num
self.url = pdf_url
thread = Thread(target=self.pdf_thread,
args=(pdf_url,))
thread.setDaemon(True)
while threading.active_count() > 35:
print(' !! TAKE 5 !!')
time.sleep(5)
print('RUN AUDIT FOR :: ' + pdf_url + ' ' + thread.getName())
thread.start()
i = 0
thread_monitor = Thread(target=self.thread_monitor,
args=('PDF', thread))
thread_monitor.setDaemon(True)
thread_monitor.start()
time.sleep(5)
msg = (' >>> Remaining PDFs: ' + row_count_i.__str__() + ' out of ' +
row_count.__str__() + ' ' + (datetime.datetime.now().__str__()[:-7]))
row_count_i -= 1
utils.logline(self.log, msg)
print(msg)
except Exception as e:
msg = e.__str__() + ' PDF:01' + '\n'
print(msg)
utils.logline(self.log, msg)
def pdf_thread(self, url):
pdf_name = ''
exit_call = ''
csv_row = []
# save PDF to disk
try:
pdf_name = BytesIO(url.split("/")[-1].encode('UTF-8')).read().__str__()[2:-1]
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
regex = re.compile(valid_chars)
pdf_name = regex.sub('', pdf_name.__str__())
self.pdf_path = self.document_folder + regex.sub('', pdf_name)
r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
with open(self.pdf_path, 'wb') as code:
code.write(r.content)
code.close()
csv_row.insert(0, [self.csv_header[0], self.line_count.__str__()])
csv_row.insert(1, [self.csv_header[1], url if url.__len__() > 0 else 'NULL'])
csv_row.insert(2, [self.csv_header[2], pdf_name if pdf_name.__len__() > 0 else 'NULL'])
csv_row.insert(3, [self.csv_header[3], self.pdf_path if self.pdf_path.__len__() > 0 else 'NULL'])
print(' >>>> PDF START:[' + url + '] ' + self.line_count.__str__() + ' ' + (
datetime.datetime.now().__str__()[:-7]))
except Exception as e:
csv_row.insert(0, [self.csv_header[0], self.line_count.__str__()])
csv_row.insert(1, [self.csv_header[1], url if url.__len__() > 0 else 'NULL'])
csv_row.insert(2, [self.csv_header[2], e.__str__()])
csv_row.insert(3, [self.csv_header[3], self.pdf_path if self.pdf_path.__len__() > 0 else 'NULL'])
print(e)
pass
my_file = os.path.join(self.document_folder + pdf_name)
try:
fp = open(my_file, 'rb')
# self.pdf(fp, csv_row)
except Exception as e:
print(' PDF LOAD FAILED !!! ' + self.line_count.__str__() + ' : ' + self.pdf_path)
csv_row.pop(3)
csv_row.insert(3, [self.csv_header[3], 'PDF FAILED TO OPEN:' + self.pdf_path if self.pdf_path.__len__() > 0 else 'NULL'])
# Write results
row = []
for i in range(csv_row.__len__()):
row.append(csv_row[i][1])
report_path = self.report_folder + self.report_name
row_append = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
index = 4
for ii in row_append:
row.insert(index, ii)
index += 1
# OPEN FAILED
with open(report_path, 'a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
writer.dialect.lineterminator.replace('\n', '')
writer.writerow(row)
return
try:
self.pdf(fp, csv_row)
except Exception as e:
print('PDF FAIL')
def pdf(self, fp, csv_row):
password = ''
extracted_text = ''
self.parser = PDFParser(fp)
self.document_t = PDFDocument
pf = PdfFileReader
# isEncrypted
try:
i = 0
try:
thread = Thread(target=self.load_pdf,
args=(PDFDocument, password))
thread.start()
thread.join(timeout=90)
except Exception as e:
print('PDF I/O error: ' + e.__str__())
row = [self.line_count, 'PDF DOCUMENT OBJECT FAILED TO LOAD - ' + e.__str__() + ': ' +
self.url, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', ]
# self.line_count += 1
report_path = self.report_folder + self.report_name
# 90 SECONDS or LOAD FAIL
with open(report_path, 'a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
writer.dialect.lineterminator.replace('\n', '')
writer.writerow(row)
stop_event.set()
document = PDFDocument
document = self.document_t
pf = PdfFileReader(BytesIO(open(self.pdf_path, 'rb').read()))
# ENCRYPTION
if self.parser.doc.encryption is not None:
csv_row.insert(4, [self.csv_header[4], 'ENCRYPTED'])
csv_row.insert(5, [self.csv_header[5], 'ENCRYPTED'])
else:
csv_row.insert(4, [self.csv_header[4], 'FALSE'])
csv_row.insert(5, [self.csv_header[5], 'NA'])
except Exception as e:
csv_row.insert(4, [self.csv_header[4], 'FAILED: ' + e.__str__()])
csv_row.insert(5, [self.csv_header[5], 'NA'])
exit_call = e.__str__() + ' document failed!!'
print(exit_call)
pass
page_count = 0
# istagged
try:
pages = PDFPage.get_pages(document)
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
rsrcmgr = PDFResourceManager()
laparams = LAParams()
page_no = 0
istagged = 'FALSE'
try:
# document.catalog
if document.catalog['MarkInfo']:
istagged = 'TRUE'
except Exception as e:
exit_call = e.__str__() + ' tagged info failed!!'
print(exit_call)
page_count = resolve1(document.catalog['Pages'])['Count']
csv_row.insert(6, [self.csv_header[6], istagged])
csv_row.insert(7, [self.csv_header[7], page_count])
except Exception as e:
csv_row.insert(6, [self.csv_header[6], 'IsTagged: ' + e.__str__()])
csv_row.insert(7, [self.csv_header[7], 'Page Count: ' + e.__str__()])
exit_call = e.__str__() + ' tagged info failed!!'
print(exit_call)
# TOC
try:
if pf.outlines:
csv_row.insert(8, [self.csv_header[8], 'TRUE'])
'''pdf_path_toc = self.document_folder + pdf_name + '_toc.txt'
places_list = pf.outlines
with open(pdf_path_toc, 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in places_list)
filehandle.close()'''
else:
csv_row.insert(8, [self.csv_header[8], 'FALSE'])
except Exception as e:
csv_row.insert(8, [self.csv_header[8], 'TOC FAILED: ' + e.__str__()])
exit_call = e.__str__() + ' toc info failed!!'
print(exit_call)
# isForm, fields,
try:
if pf.getFields():
csv_row.insert(9, [self.csv_header[9], 'TRUE'])
csv_row.insert(10, [self.csv_header[10], pf.getFields().__len__()])
else:
csv_row.insert(9, [self.csv_header[9], 'FALSE'])
csv_row.insert(10, [self.csv_header[10], 0])
except Exception as e:
csv_row.insert(9, [self.csv_header[9], 'FORMS: ' + e.__str__()])
csv_row.insert(10, [self.csv_header[10], 'FIELDS: ' + e.__str__()])
exit_call = e.__str__() + ' forms failed!!'
print(exit_call)
# tables
csv_row.insert(11, [self.csv_header[11], 'NOT RUN'])
write_clip = ''
word_count = 0
words_per_page = 0
char_count = 0
chars_per_word = 0
image_count = 0
# TODO: write 3 page sample and word count
try:
if pf.getNumPages() < 50:
for page in range(pf.getNumPages()):
p = pf.getPage(page)
text_clip = p.extractText().encode('UTF-8')
text_clip = BytesIO(text_clip).read().__str__()[2:]
count_clip = re.findall(r"[^\W_]+", text_clip, re.MULTILINE)
word_count += len(count_clip)
char_count += len(text_clip)
if page <= 3:
write_clip += '[ PAGE ' + (page + 1).__str__() + ' START ] '
write_clip += text_clip.replace('\n', '').replace(',', ' ').replace('"', '')
write_clip += '[ PAGE ' + (page + 1).__str__() + ' END ]'
else:
write_clip = 'OVER 50 PAGES - SAMPLE SKIPPED'
except Exception as e:
exit_call = e.__str__() + ' :: TEXT sample failed!!'
write_clip = exit_call
word_count = exit_call
char_count = exit_call
print(exit_call)
# TODO: Words/chars per page
try:
if not word_count == 0:
chars_per_word = char_count / word_count
else:
chars_per_word = 0
if not page_count == 0:
words_per_page = word_count / page_count
else:
words_per_page = 0
except Exception as e:
exit_call = e.__str__() + ' :: WORD METRICS failed!!'
chars_per_word = exit_call
words_per_page = exit_call
print(exit_call)
# TODO: Add to row
i = 12
try:
csv_row.insert(i, [self.csv_header[i], word_count.__str__()])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'WORD_COUNT: ' + e.__str__()])
i = 13
try:
csv_row.insert(i, [self.csv_header[i], char_count.__str__()])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'CHAR_COUNT: ' + e.__str__()])
i = 14
try:
csv_row.insert(i, [self.csv_header[i], words_per_page.__str__()])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'WPP: ' + e.__str__()])
i = 15
try:
csv_row.insert(i, [self.csv_header[i], chars_per_word.__str__()])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'CPP: ' + e.__str__()])
# TODO: IMAGES
i = 16
'''try:
pdfImages = Globals.base_folder + 'cli-tools\\pdfimages.exe'
img_folder = self.document_folder + 'images\\' # + pdf_name[:-4] + '\\'
if not os.path.exists(img_folder):
os.makedirs(img_folder)
# cmd = pdfImages + ' -list ' + '\"' + pdf_path + '\"'
# output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].split(b'\n')
# save images to disk
cmd = pdfImages + ' -list \"' + self.pdf_path + '\" \"' + ' ' + '\"'
# subprocess.Popen(cmd, stdout=subprocess.PIPE)
os.chdir(img_folder)
image_list = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].split(b'\r\n')
# os.remove(img_folder)
# image_count = output.count('\n')
image_count = image_list.__len__()
if image_count > 2:
# target = open(pdf_path_image, 'w')
# target.write(image_list)
# target.close()
csv_row.insert(i, [self.csv_header[i], (image_count - 2).__str__()])
elif image_count == 0:
csv_row.insert(i, [self.csv_header[i], 0])
else:
csv_row.insert(i, [self.csv_header[i], 0])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], e.__str__() + ' image info failed!!'])
exit_call = e.__str__() + ' image info failed!!'
print(exit_call)'''
# TODO: IMAGES per page
i = 17
percent_img_per_page = float
try:
if not image_count == 0 or page_count == 0:
percent_img_per_page = (float(image_count) / float(page_count)) * 100
else:
percent_img_per_page = 0
csv_row.insert(i, [self.csv_header[i], percent_img_per_page])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'IMG: ' + e.__str__()])
# TODO: OCR risk
i = 18
try:
if words_per_page == 0 or percent_img_per_page > 3000:
ocr_risk = 5
elif words_per_page < 15 or percent_img_per_page > 2000:
ocr_risk = 4
elif words_per_page < 40 or percent_img_per_page > 1000:
ocr_risk = 3
elif words_per_page < 70 or percent_img_per_page > 425:
ocr_risk = 2
elif words_per_page < 80 or percent_img_per_page > 200:
ocr_risk = 1
else:
ocr_risk = 0
csv_row.insert(i, [self.csv_header[i], ocr_risk])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'OCR: ' + e.__str__()])
# author, creator, producer, subject, title,
di = pf
try:
di = pf.documentInfo
except Exception as e:
exit_call = e.__str__() + ' :: DOCUMENT INFO LOAD failed!!'
print(exit_call)
# Document info
if di:
# Author
try:
i = 19
if di.author:
csv_row.insert(i, [self.csv_header[i], di.author.encode('UTF-8')])
else:
csv_row.insert(i, [self.csv_header[i], 'NULL'])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'AUTHOR: ' + e.__str__()])
exit_call = e.__str__() + ' doc info failed!!'
print(exit_call)
# Creator
try:
i = 20
if di.creator:
csv_row.insert(i, [self.csv_header[i], di.creator.encode('UTF-8')])
else:
csv_row.insert(i, [self.csv_header[i], 'NULL'])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'CREATOR: ' + e.__str__()])
print(exit_call)
print('#5.1')
# Producer
try:
i = 21
if di.producer:
csv_row.insert(i, [self.csv_header[i], di.producer.encode('UTF-8')])
else:
csv_row.insert(i, [self.csv_header[i], 'NULL'])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'PRODUCER: ' + e.__str__()])
print(exit_call)
# Subject
try:
i = 22
if di.subject:
csv_row.insert(i, [self.csv_header[i], di.subject.encode('UTF-8')])
else:
csv_row.insert(i, [self.csv_header[i], 'NULL'])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'SUBJECT: ' + e.__str__()])
print(exit_call)
# Title
try:
i = 23
if di.title:
csv_row.insert(i, [self.csv_header[i], di.title.encode('UTF-8')])
else:
csv_row.insert(i, [self.csv_header[i], 'NULL'])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], 'TITLE: ' + e.__str__()])
print(exit_call)
# Document clip
i = 24
try:
csv_row.insert(i, [self.csv_header[i], write_clip])
except Exception as e:
csv_row.insert(i, [self.csv_header[i], e.__str__()])
# Write results
row = []
for i in range(csv_row.__len__()):
row.append(csv_row[i][1])
report_path = self.report_folder + self.report_name
# COPLETE WRITE
with open(report_path, 'a', encoding='utf8', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
writer.dialect.lineterminator.replace('\n', '')
writer.writerow(row)
# csv_file.close()
fp.close()
os.remove(self.pdf_path)
# Log close
msg = (' >>>> PDF complete:[' + self.url + '] ' + self.line_count.__str__() + ' ' +
(datetime.datetime.now().__str__()[:-7]))
print(msg)
utils.logline(self.log, msg)
| 43.825939 | 133 | 0.50035 | 24,867 | 0.968266 | 0 | 0 | 0 | 0 | 0 | 0 | 4,904 | 0.190951 |
7df69847b16a72c401c8d03768fb93c74d01b5c9 | 2,114 | py | Python | morepath/tests/test_method_directive.py | DuncanBetts/morepath | acad10489b051df9c512f6735a9338854745a599 | [
"BSD-3-Clause"
]
| null | null | null | morepath/tests/test_method_directive.py | DuncanBetts/morepath | acad10489b051df9c512f6735a9338854745a599 | [
"BSD-3-Clause"
]
| null | null | null | morepath/tests/test_method_directive.py | DuncanBetts/morepath | acad10489b051df9c512f6735a9338854745a599 | [
"BSD-3-Clause"
]
| null | null | null | import morepath
from webtest import TestApp as Client
def test_implicit_function():
class app(morepath.App):
@morepath.dispatch_method()
def one(self):
return "Default one"
@morepath.dispatch_method()
def two(self):
return "Default two"
@app.path(path='')
class Model(object):
def __init__(self):
pass
@app.method(app.one)
def one_impl(self):
return self.two()
@app.method(app.two)
def two_impl(self):
return "The real two"
@app.view(model=Model)
def default(self, request):
return request.app.one()
c = Client(app())
response = c.get('/')
assert response.body == b'The real two'
def test_implicit_function_mounted():
class base(morepath.App):
@morepath.dispatch_method()
def one(self):
return "Default one"
@morepath.dispatch_method()
def two(self):
return "Default two"
class alpha(base):
pass
class beta(base):
def __init__(self, id):
self.id = id
@alpha.mount(path='mounted/{id}', app=beta)
def mount_beta(id):
return beta(id=id)
class AlphaRoot(object):
pass
class Root(object):
def __init__(self, id):
self.id = id
@alpha.path(path='/', model=AlphaRoot)
def get_alpha_root():
return AlphaRoot()
@beta.path(path='/', model=Root)
def get_root(app):
return Root(app.id)
@beta.method(base.one)
def one_impl(self):
return self.two()
@beta.method(base.two)
def two_impl(self):
return "The real two"
@alpha.view(model=AlphaRoot)
def alpha_default(self, request):
return request.app.one()
@beta.view(model=Root)
def default(self, request):
return "View for %s, message: %s" % (self.id, request.app.one())
c = Client(alpha())
response = c.get('/mounted/1')
assert response.body == b'View for 1, message: The real two'
response = c.get('/')
assert response.body == b'Default one'
| 21.793814 | 72 | 0.580889 | 702 | 0.332072 | 0 | 0 | 1,293 | 0.611637 | 0 | 0 | 211 | 0.099811 |
7df6fe1ea2b65847f447c2f9cd2b5b13e71d4aef | 14,020 | py | Python | edb/edgeql/tracer.py | hyperdrivetech/edgedb | 6d84d607889eca771e902f28c2329e388fd172b0 | [
"Apache-2.0"
]
| null | null | null | edb/edgeql/tracer.py | hyperdrivetech/edgedb | 6d84d607889eca771e902f28c2329e388fd172b0 | [
"Apache-2.0"
]
| null | null | null | edb/edgeql/tracer.py | hyperdrivetech/edgedb | 6d84d607889eca771e902f28c2329e388fd172b0 | [
"Apache-2.0"
]
| null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2015-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import functools
import typing
from edb.schema import name as sn
from edb.schema import objects as so
from edb.edgeql import ast as qlast
class Type:
def __init__(self, name):
self.name = name
def get_name(self, schema):
return self.name
class ObjectType(Type):
def __init__(self, name):
super().__init__(name)
self.pointers = {}
def is_pointer(self):
return False
def getptr(self, schema, name):
return self.pointers.get(name)
class UnionType:
def __init__(self, types):
self.types = types
class Pointer:
def __init__(self, name, *, source=None, target=None):
self.name = name
self.source = source
self.target = target
self.pointers = {}
def is_pointer(self):
return True
def getptr(self, schema, name):
return self.pointers.get(name)
def get_target(self, schema):
return self.target
def get_name(self, schema):
return self.name
def trace_refs(
qltree: qlast.Base,
*,
schema,
source: typing.Optional[sn.Name] = None,
subject: typing.Optional[sn.Name] = None,
path_prefix: typing.Optional[sn.Name] = None,
module: typing.Optional[str] = None,
objects: typing.Dict[str, object],
) -> typing.FrozenSet[sn.Name]:
"""Return a list of schema item names used in an expression."""
ctx = TracerContext(schema, module, objects,
source, subject, path_prefix)
trace(qltree, ctx=ctx)
return frozenset(ctx.refs)
class TracerContext:
def __init__(self, schema, module, objects, source, subject, path_prefix):
self.schema = schema
self.refs = set()
self.module = module
self.objects = objects
self.source = source
self.subject = subject
self.path_prefix = path_prefix
def get_ref_name(self, ref: qlast.ObjectRef) -> sn.Name:
if ref.module:
return sn.Name(module=ref.module, name=ref.name)
elif f'{self.module}::{ref.name}' in self.objects:
return sn.Name(module=self.module, name=ref.name)
else:
return sn.Name(module="std", name=ref.name)
@functools.singledispatch
def trace(node: qlast.Base, *,
ctx: TracerContext) -> typing.Optional[so.Object]:
raise NotImplementedError(f"do not know how to trace {node!r}")
@trace.register
def trace_none(node: type(None), *, ctx: TracerContext) -> None:
pass
@trace.register
def trace_Constant(node: qlast.BaseConstant, *, ctx: TracerContext) -> None:
pass
@trace.register
def trace_Array(node: qlast.Array, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el, ctx=ctx)
@trace.register
def trace_Set(node: qlast.Set, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el, ctx=ctx)
@trace.register
def trace_Tuple(node: qlast.Tuple, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el, ctx=ctx)
@trace.register
def trace_NamedTuple(node: qlast.NamedTuple, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el.val, ctx=ctx)
@trace.register
def trace_BinOp(node: qlast.BinOp, *, ctx: TracerContext) -> None:
trace(node.left, ctx=ctx)
trace(node.right, ctx=ctx)
@trace.register
def trace_UnaryOp(node: qlast.UnaryOp, *, ctx: TracerContext) -> None:
trace(node.operand, ctx=ctx)
@trace.register
def trace_Detached(node: qlast.DetachedExpr, *, ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
@trace.register
def trace_TypeCast(node: qlast.TypeCast, *, ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
if not node.type.subtypes:
ctx.refs.add(ctx.get_ref_name(node.type.maintype))
@trace.register
def trace_IsOp(node: qlast.IsOp, *, ctx: TracerContext) -> None:
trace(node.left, ctx=ctx)
if not node.right.subtypes:
ctx.refs.add(ctx.get_ref_name(node.right.maintype))
@trace.register
def trace_Introspect(node: qlast.Introspect, *, ctx: TracerContext) -> None:
if not node.type.subtypes:
ctx.refs.add(ctx.get_ref_name(node.type.maintype))
@trace.register
def trace_FunctionCall(node: qlast.FunctionCall, *,
ctx: TracerContext) -> None:
for arg in node.args:
trace(arg, ctx=ctx)
for arg in node.kwargs.values():
trace(arg, ctx=ctx)
@trace.register
def trace_Indirection(node: qlast.Indirection, *, ctx: TracerContext) -> None:
for indirection in node.indirection:
trace(indirection, ctx=ctx)
trace(node.arg, ctx=ctx)
@trace.register
def trace_Index(node: qlast.Index, *, ctx: TracerContext) -> None:
trace(node.index, ctx=ctx)
@trace.register
def trace_Slice(node: qlast.Slice, *, ctx: TracerContext) -> None:
trace(node.start, ctx=ctx)
trace(node.stop, ctx=ctx)
@trace.register
def trace_Path(node: qlast.Path, *,
ctx: TracerContext) -> typing.Optional[so.Object]:
tip = None
ptr = None
plen = len(node.steps)
for i, step in enumerate(node.steps):
if isinstance(step, qlast.ObjectRef):
refname = ctx.get_ref_name(step)
if refname in ctx.objects:
ctx.refs.add(refname)
tip = ctx.objects[refname]
else:
tip = ctx.schema.get(refname)
elif isinstance(step, qlast.Ptr):
if i == 0:
# Abbreviated path.
if ctx.path_prefix in ctx.objects:
tip = ctx.objects[ctx.path_prefix]
else:
# We can't reason about this path.
return
if step.type == 'property':
lprop = ptr.getptr(ctx.schema, step.ptr.name)
if lprop is None:
# Invalid link property reference, bail.
return
if isinstance(lprop, Pointer):
ctx.refs.add(f'{lprop.source}@{step.ptr.name}')
else:
if step.direction == '<':
if plen > i + 1 and isinstance(node.steps[i + 1],
qlast.TypeIndirection):
# A reverse link traversal with a type filter,
# process it on the next step.
pass
else:
# otherwise we cannot say anything about the target,
# so bail.
return
else:
if tip is None:
# We can't reason about this path.
return
ptr = tip.getptr(ctx.schema, step.ptr.name)
if ptr is None:
# Invalid pointer reference, bail.
return
if ptr.source == tip:
tip_name = tip.get_name(ctx.schema)
ctx.refs.add(f'{tip_name}@{step.ptr.name}')
tip = ptr.get_target(ctx.schema)
elif isinstance(step, qlast.TypeIndirection):
tip = _resolve_type_expr(step.type, ctx=ctx)
prev_step = node.steps[i - 1]
if prev_step.direction == '<':
ptr = tip.getptr(ctx.schema, prev_step.ptr.name)
if ptr is None:
# Invalid pointer reference, bail.
return
if isinstance(tip, Type):
tip_name = tip.get_name(ctx.schema)
ctx.refs.add(f'{tip_name}@{prev_step.ptr.name}')
tip = ptr.get_target(ctx.schema)
else:
tr = trace(step, ctx=ctx)
if tr is not None:
tip = tr
if isinstance(tip, Pointer):
ptr = tip
return tip
@trace.register
def trace_Source(node: qlast.Source, *, ctx: TracerContext) -> so.Object:
return ctx.objects[ctx.source]
@trace.register
def trace_Subject(node: qlast.Subject, *,
ctx: TracerContext) -> typing.Optional[so.Object]:
# Apparently for some paths (of length 1) ctx.subject may be None.
if ctx.subject is not None:
return ctx.objects[ctx.subject]
def _resolve_type_expr(
texpr: qlast.TypeExpr, *,
ctx: TracerContext
) -> typing.Union[so.Object, UnionType]:
if isinstance(texpr, qlast.TypeName):
if texpr.subtypes:
return Type(name=texpr.maintype.name)
else:
refname = ctx.get_ref_name(texpr.maintype)
obj = ctx.objects.get(refname)
if obj is None:
obj = ctx.schema.get(refname)
else:
ctx.refs.add(refname)
return obj
elif isinstance(texpr, qlast.TypeOp):
if texpr.op == '|':
return UnionType([
_resolve_type_expr(texpr.left, ctx=ctx),
_resolve_type_expr(texpr.right, ctx=ctx),
])
else:
raise NotImplementedError(
f'unsupported type operation: {texpr.op}')
else:
raise NotImplementedError(
f'unsupported type expression: {texpr!r}'
)
@trace.register
def trace_TypeIndirection(node: qlast.TypeIndirection, *,
ctx: TracerContext) -> None:
trace(node.type, ctx=ctx)
@trace.register
def trace_TypeOf(node: qlast.TypeOf, *, ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
@trace.register
def trace_TypeName(node: qlast.TypeName, *, ctx: TracerContext) -> None:
if node.subtypes:
for st in node.subtypes:
trace(st, ctx=ctx)
else:
fq_name = node.maintype.name
if node.maintype.module:
fq_name = f'{node.maintype.module}::{fq_name}'
ctx.refs.add(fq_name)
@trace.register
def trace_TypeOp(node: qlast.TypeOp, *, ctx: TracerContext) -> None:
trace(node.left, ctx=ctx)
trace(node.right, ctx=ctx)
@trace.register
def trace_IfElse(node: qlast.IfElse, *, ctx: TracerContext) -> None:
trace(node.if_expr, ctx=ctx)
trace(node.else_expr, ctx=ctx)
trace(node.condition, ctx=ctx)
@trace.register
def trace_Shape(node: qlast.Shape, *, ctx: TracerContext) -> None:
if isinstance(node.expr, qlast.Path):
tip = trace(node.expr, ctx=ctx)
orig_prefix = ctx.path_prefix
if tip is not None:
ctx.path_prefix = tip.get_name(ctx.schema)
else:
ctx.path_prefix = None
else:
trace(node.expr, ctx=ctx)
for element in node.elements:
trace(element, ctx=ctx)
if isinstance(node.expr, qlast.Path):
ctx.path_prefix = orig_prefix
@trace.register
def trace_ShapeElement(node: qlast.ShapeElement, *,
ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
for element in node.elements:
trace(element, ctx=ctx)
trace(node.where, ctx=ctx)
for element in node.orderby:
trace(element, ctx=ctx)
trace(node.offset, ctx=ctx)
trace(node.limit, ctx=ctx)
trace(node.compexpr, ctx=ctx)
@trace.register
def trace_Select(node: qlast.SelectQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.result, ctx=ctx)
if node.where is not None:
trace(node.where, ctx=ctx)
if node.orderby:
for expr in node.orderby:
trace(expr, ctx=ctx)
if node.offset is not None:
trace(node.offset, ctx=ctx)
if node.limit is not None:
trace(node.limit, ctx=ctx)
@trace.register
def trace_SortExpr(node: qlast.SortExpr, *, ctx: TracerContext) -> None:
trace(node.path, ctx=ctx)
@trace.register
def trace_InsertQuery(node: qlast.InsertQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.subject, ctx=ctx)
for element in node.shape:
trace(element, ctx=ctx)
@trace.register
def trace_UpdateQuery(node: qlast.UpdateQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.subject, ctx=ctx)
for element in node.shape:
trace(element, ctx=ctx)
trace(node.where, ctx=ctx)
@trace.register
def trace_DeleteQuery(node: qlast.DeleteQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.subject, ctx=ctx)
if node.where is not None:
trace(node.where, ctx=ctx)
if node.orderby:
for expr in node.orderby:
trace(expr, ctx=ctx)
if node.offset is not None:
trace(node.offset, ctx=ctx)
if node.limit is not None:
trace(node.limit, ctx=ctx)
@trace.register
def trace_DescribeStmt(
node: qlast.DescribeStmt, *,
ctx: TracerContext,
) -> None:
if node.object:
fq_name = node.object.name
if node.object.module:
fq_name = f'{node.object.module}::{fq_name}'
ctx.refs.add(fq_name)
| 27.984032 | 78 | 0.604922 | 1,513 | 0.107917 | 0 | 0 | 10,029 | 0.715335 | 0 | 0 | 1,460 | 0.104137 |
7df75836ee916a28f4a031535dcb56b53a8daeb4 | 255 | py | Python | libraries/website/docs/snippets/envs/tree_to_list.py | justindujardin/mathy | 776ac528d4586d6ea558a7212adb3559ea487a3c | [
"MIT"
]
| 95 | 2020-01-02T23:02:34.000Z | 2022-03-08T18:57:24.000Z | libraries/website/docs/snippets/envs/tree_to_list.py | justindujardin/mathy | 776ac528d4586d6ea558a7212adb3559ea487a3c | [
"MIT"
]
| 44 | 2020-01-05T03:07:45.000Z | 2021-08-11T20:45:53.000Z | libraries/website/docs/snippets/envs/tree_to_list.py | justindujardin/mathy | 776ac528d4586d6ea558a7212adb3559ea487a3c | [
"MIT"
]
| 5 | 2020-04-04T16:46:36.000Z | 2022-03-30T08:10:02.000Z | from typing import List
from mathy_core import ExpressionParser, MathExpression
parser = ExpressionParser()
expression: MathExpression = parser.parse("4 + 2x")
nodes: List[MathExpression] = expression.to_list()
# len([4,+,2,*,x])
assert len(nodes) == 5
| 25.5 | 55 | 0.74902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.101961 |
7df75aa4524bb4f5a708857ab0d660fb8ccedfb8 | 603 | py | Python | math/0x04-convolutions_and_pooling/test/2-main.py | cbarros7/holbertonschool-machine_learning | 1edb4c253441f6319b86c9c590d1e7dd3fc32bf4 | [
"MIT"
]
| 1 | 2022-03-09T19:12:22.000Z | 2022-03-09T19:12:22.000Z | math/0x04-convolutions_and_pooling/test/2-main.py | cbarros7/holbertonschool-machine_learning | 1edb4c253441f6319b86c9c590d1e7dd3fc32bf4 | [
"MIT"
]
| null | null | null | math/0x04-convolutions_and_pooling/test/2-main.py | cbarros7/holbertonschool-machine_learning | 1edb4c253441f6319b86c9c590d1e7dd3fc32bf4 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve_grayscale_padding = __import__(
'2-convolve_grayscale_padding').convolve_grayscale_padding
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/MNIST.npz')
images = dataset['X_train']
print(images.shape)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
images_conv = convolve_grayscale_padding(images, kernel, (2, 4))
print(images_conv.shape)
plt.imshow(images[0], cmap='gray')
plt.show()
plt.imshow(images_conv[0], cmap='gray')
plt.show()
| 27.409091 | 68 | 0.6733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.207297 |
7df78eabcc3fb72c6b36049cdb0e6b3517bdbd8a | 2,950 | py | Python | code.py | surojitnath/olympic-hero | aee1ddf291bf5097fa7fd5442483fbbe87ec001f | [
"MIT"
]
| null | null | null | code.py | surojitnath/olympic-hero | aee1ddf291bf5097fa7fd5442483fbbe87ec001f | [
"MIT"
]
| null | null | null | code.py | surojitnath/olympic-hero | aee1ddf291bf5097fa7fd5442483fbbe87ec001f | [
"MIT"
]
| null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace =True)
data.head(10)
#Code starts here
# --------------
try:
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
#print(data['Better_Event'])
Total_Count=data['Better_Event'].value_counts()
if(Total_Count[0]>Total_Count[1]):
better_event='Summer'
print(better_event)
print(data)
else:
better_event='Winter'
print(better_event)
except:
print("code Failed")
else:
print("code passed Successfully")
# --------------
#Code starts here
top_countries= data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
#print(top_countries)
def top_ten(Col):
country_list= list((data.nlargest(11,Col)['Country_Name']))
country_list=country_list[1:]
print(country_list)
return country_list
top_10_summer=top_ten('Total_Summer')
top_10_winter =top_ten('Total_Winter')
top_10 =top_ten('Total_Medals')
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print("common",common)
# --------------
#Code starts here
summer_df =data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df =data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=summer_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=pd.Series(data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total'])
print(data_1['Total_Points'])
most_points = max(data_1['Total_Points'])
print(most_points)
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(most_points)
print(best_country)
# --------------
#Code starts here
best = pd.DataFrame(data[data['Country_Name']==best_country])
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
# Rotate X-axes labels
plt.xticks(rotation=45)
| 27.570093 | 109 | 0.694915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.389492 |
7df8cceb59a2bcfb8715aedd4215b42ada0971fd | 7,096 | py | Python | planes/kissSlope/kissSlopeWing2.py | alexpGH/blenderCadCamTools | 1db2a750ed227d46e174350a2e37c4951c669867 | [
"MIT"
]
| 3 | 2020-12-28T11:58:26.000Z | 2021-05-31T03:03:04.000Z | planes/kissSlope/kissSlopeWing2.py | alexpGH/blenderCadCamTools | 1db2a750ed227d46e174350a2e37c4951c669867 | [
"MIT"
]
| null | null | null | planes/kissSlope/kissSlopeWing2.py | alexpGH/blenderCadCamTools | 1db2a750ed227d46e174350a2e37c4951c669867 | [
"MIT"
]
| null | null | null | import bpy
import math
import numpy as np
#=== add scripts dir to path
import sys
import os
#=== define path of scripts dir
libDir=bpy.path.abspath("//../../scripts/") # version1: relative to current file
#libDir="/where/you/placed/blenderCadCam/scripts/" #version 2: usa an absolute path
if not libDir in sys.path:
sys.path.append(libDir)
#=== add local dir to path
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
#print(sys.path)
#=== blender imports only once even if the file change. if we edit outsde, we need to force a reload
from importlib import reload
#=== import scripts modules
import wingLib
reload(wingLib)
#===================================================================================================
#===
#===================================================================================================
if 0:
import ipdb
ipdb.set_trace()
ipdb.set_trace(context=5)
if 1:
#=== delete all but camera and lamp to start from a clean scene collection
wingLib.deleteAllButNames(['outl','outl2','myWing1','myWing2'])
#===================================================================================================
#=== basic geometry definition
#===================================================================================================
foilwidth=1.6
chAdditive=0.06 #we add this additive as constant to the chordlength to generate an (towrds tip) increasing over-elliptic ch
chordlength=0.17
nSec=41*2
halfSpan=foilwidth/2.0
if 1:
#=============================================================
#=== prepare profiles
#=============================================================
f=libDir+'/AG25_resampled.dat'
cAG25, leAG25=wingLib.foilImport(f,'auto')
f=libDir+'/AG26_resampled.dat'
cAG26, leAG26=wingLib.foilImport(f,'auto')
f=libDir+'/AG14_resampled.dat'
cAG14, leAG14=wingLib.foilImport(f,'auto')
#f=libDir+'/AG27_resampled.dat'
#cAG27, leAG27=wingLib.foilImport(f,'auto')
#=== downsampling of the root profile - we don't nee a too fine resolution for the CAM model
nPoints=100
cAG25r, leAG25r=wingLib.foildDataReduceToNpoints(cAG25,nPoints, True) #True: save trailing edge (kep 1st and last point)
pAG25r=wingLib.curveBezierFromPoints(cAG25r,'PAG25r',True,True)
#=== get & interpolate the outer profile on the root (necessary for morphing)
pAG26=wingLib.curveBezierFromPoints(cAG26,'PAG26',True,True)
pAG14=wingLib.curveBezierFromPoints(cAG14,'PAG14',True,True)
#pAG27=wingLib.curveBezierFromPoints(cAG27,'PAG27',True,True)
cAG14r=wingLib.interpolateBezier2on1(pAG25r, pAG14, leAG25r, leAG14, 40)
cAG26r=wingLib.interpolateBezier2on1(pAG25r, pAG26, leAG25r, leAG26, 40)
#cAG27_=wingLib.interpolateBezier2on1(pAG25, pAG27, leAG25, leAG27, 40)
#=== plot for check:
if 0:
pAG25=wingLib.curveBezierFromPoints(cAG25,'PAG25',True,True)
pAG14r=wingLib.curveBezierFromPoints(cAG14_,'PG14r',True,True)
pAG26r=wingLib.curveBezierFromPoints(cAG26_,'ProfileAG26r',True,True)
#=== clean-up
if 1:
wingLib.deleteByName('PAG25r')
wingLib.deleteByName('PAG14')
wingLib.deleteByName('PAG26')
# compile the coord dict for easy access
cDict={
"AG25": cAG25r,
"AG26": cAG26r,
"AG14": cAG14r,
#"AG27": cAG27_,
}
#=============================================================
#=== prepare base sections settings
#=============================================================
baseSectionsL=[]
baseSectionsL.append({"p":'AG25', "s":0.00*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG25', "s":0.05*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG26', "s":0.40*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG14', "s":0.95*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'})
baseSectionsL.append({"p":'AG14', "s":1.00*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'})
#=============================================================
#=== chordlength distribution
#=============================================================
#=== define section-wise ch extension
dChL=[]
dChL.append({"s": 0.00*halfSpan, "dy": chAdditive})
dChL.append({"s": 0.40*halfSpan, "dy": chAdditive})
dChL.append({"s": 0.95*halfSpan, "dy": chAdditive})
dChL.append({"s": 1.00*halfSpan, "dy": chAdditive})
#=== ellipse parameters
a=halfSpan
b=(chordlength-chAdditive)/2.0
#=== get/init the wing Data object
# for morphed profiles, le is the same
wingData=wingLib.WingFromSections(cDict, leAG25r, baseSectionsL, halfSpan, a, b, dChL)
if 1:
#=== get data for indivudual CAM sections
# get basic ellipse arc points in 1st and 2nd quadrant (the unshifted leading edge) and chordlength
x,y=wingLib.ellipseParamV(a,b,nSec)
ch=np.multiply(y,2.0)#
#==adapted chordlength
ch=wingLib.chordExtensionLinear(ch, x, dChL)
#shellthickness
#thickness=1.0
#=== set 2d profile to be used (gives us a function reference used later)
func4coords=wingData.coords
quality='none'
#plot Re(span)
if 0:
v=8.0# determined from stall velocity, see e.g. https://alexpgh.github.io/foss-toolchain-mpcnc/blenderKissSlope/#wing-loading-and-re
v2=9.7
#v3=15.0
#v4=30.0
#v5=45.0
nu=1.52E-05
outFile=bpy.path.abspath("//Fig_ReSpan_fast.png")
Re=[]
Re.append(np.multiply(ch,v/nu))
Re.append(np.multiply(ch,v2/nu))
#Re.append(np.multiply(ch,v3/nu))
#Re.append(np.multiply(ch,v4/nu))
#Re.append(np.multiply(ch,v5/nu))
numpy_array = np.array(Re)
transpose = numpy_array.T
#legend=[str(v)+' m/s', str(v2), str(v3),str(v4),str(v5)]
legend=[]
#n=int(len(Re)/2)+1
n=int(transpose.shape[0]/2)+1
#import ipdb
#ipdb.set_trace()
#ipdb.set_trace(context=5)
#wingLib.plotArray(x[0:n],Re[0:n],'Re(span)',outFile)
#wingLib.plotArray(x,Re,'Re(span)',outFile)
wingLib.plotArray(x[0:n],transpose[0:n,:],'Re(span)', legend, outFile)
import ipdb
ipdb.set_trace()
ipdb.set_trace(context=5)
#=== leading edge shift definition
LeShiftL=[]
LeShiftL.append(wingLib.LeShift('elliptic',0.04, 0.5, 1.0,foilwidth/2.0))
ysh=wingLib.applyLeShifts(x,y, LeShiftL)
#placeSections(x,ysh,ch)
sectionNames=wingLib.placeSectionsMinLimited(x,ysh,ch,0.001,func4coords,quality)
if 1:
wingLib.bridgeListOfEdgeLoopsCloseOuterWithFace(sectionNames,'myWing')
#shift to origin
bpy.context.object.location[1] = -chordlength/2.0
bpy.context.object.location[2] = 0.0
| 29.322314 | 140 | 0.567644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,396 | 0.478579 |
7dfb15185b5928b42e0c69caa80b31116a8fea1a | 1,715 | py | Python | saleor/order/migrations/0072_django_price_2.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
]
| 19 | 2019-12-03T17:28:07.000Z | 2021-09-10T21:30:52.000Z | saleor/order/migrations/0072_django_price_2.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
]
| 51 | 2019-12-06T08:06:07.000Z | 2021-05-06T02:10:50.000Z | saleor/order/migrations/0072_django_price_2.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
]
| 20 | 2020-02-03T00:38:59.000Z | 2022-01-03T13:07:52.000Z | # Generated by Django 2.2.4 on 2019-08-14 09:13
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0071_order_gift_cards")]
operations = [
migrations.RenameField(
model_name="order",
old_name="shipping_price_gross",
new_name="shipping_price_gross_amount",
),
migrations.RenameField(
model_name="order",
old_name="shipping_price_net",
new_name="shipping_price_net_amount",
),
migrations.RenameField(
model_name="order", old_name="total_gross", new_name="total_gross_amount"
),
migrations.RenameField(
model_name="order", old_name="total_net", new_name="total_net_amount"
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_gross",
new_name="unit_price_gross_amount",
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_net",
new_name="unit_price_net_amount",
),
migrations.AddField(
model_name="order",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
migrations.AddField(
model_name="orderline",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
]
| 31.181818 | 85 | 0.58484 | 1,589 | 0.926531 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.237318 |
7dfb769eb03d5be318cb102a630728947e956816 | 9,382 | py | Python | miping/training/features.py | mclgoerg/MiningPersonalityInGerman | 4c5811a0f72100b7afef9695475a6de9251444b7 | [
"Apache-2.0"
]
| 1 | 2020-09-11T01:11:19.000Z | 2020-09-11T01:11:19.000Z | miping/training/features.py | mclgoerg/MiningPersonalityInGerman | 4c5811a0f72100b7afef9695475a6de9251444b7 | [
"Apache-2.0"
]
| null | null | null | miping/training/features.py | mclgoerg/MiningPersonalityInGerman | 4c5811a0f72100b7afef9695475a6de9251444b7 | [
"Apache-2.0"
]
| 2 | 2020-08-12T15:57:06.000Z | 2020-12-17T18:11:03.000Z | import numpy as np
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from ..models.profile import Profile
from ..interfaces.helper import Helper
from ..interfaces.glove import GloVe
from .noGloveValueError import NoGloveValueError
class Features:
"""
Contains all pipeline functions for both LIWC and glove.
"""
def __init__(
self,
):
return
def featureLIWC(
self,
profileCol,
):
"""
Extract LIWC features (namely LIWC categories) from
each profile in list as feature.
Parameters
----------
profileCol : list, default=None, required
List with profiles to generate features for.
Returns
-------
np.array(outputList) : numpy.array
Generated features in numpy format.
"""
# will contain the LIWC measures for each profile
outputList = []
# loop over profileCollection
for profile in profileCol:
# create row
liwc_data = []
# get names of liwc categories
for attrName in Profile.liwc_category_list:
# get value of current category
attr = getattr(profile, attrName)
# append to current profile
# and convert to float
liwc_data.append(np.float(attr))
outputList.append(liwc_data)
# create numpy array, as scikit needs this format
return np.array(outputList)
def createLIWCFeaturePipeline(
self,
):
"""
Create pipeline that can be passed into multiple training procceses
this is just a blueprint for calculating the features
no features are calculated yet!
Returns
-------
featurePipeline : Pipeline
Pipeline containing feature generation and scaling.
"""
# Create skicit-learn compatible FunctionTransformers
# for usage with other sklearn functions
# featureLIWC is the name of the function to be called to
# extract features
liwc_Trans = FunctionTransformer(self.featureLIWC, validate=False)
# Combine feature(s) with FeatureUnion
featureTransformer = FeatureUnion([
('liwc', liwc_Trans),
], n_jobs=-1) # parallelize via multiprocess
# combine into a pipeline including scaling
featurePipeline = Pipeline([
('features', featureTransformer),
("stdScaler", StandardScaler())
])
return featurePipeline
def _condenseGloVeVectors(
self,
vectorList,
):
"""
For each user a vectorList is passed in with different length.
This will be condensed into a single 900 dim vector.
"""
# convert to np array for mean,max,min functions
vectorList = np.array(vectorList)
# correct structure from (1,x,300) to (x,300)
vectorList = vectorList[0]
# for each dimension identify mean,max,min
# and save in separate vector
meanVector = vectorList.mean(axis=0)
maxVector = np.amax(a=vectorList, axis=0)
minVector = np.amin(a=vectorList, axis=0)
# combine all 300 dim vectors in 900 dim vector
returnVector = []
returnVector.extend(meanVector)
returnVector.extend(maxVector)
returnVector.extend(minVector)
# convert to numpy array for scikit
returnVector = np.array(returnVector)
return returnVector
def featureGloVe(
self,
profileList,
):
"""
For each profile in profile list generate GloVe features.
Each profile contains text and for this text the glove vectors
are retrieved and condensed into one single vector for this user.
All user vectors are appended into the outputList.
The word coverageStatistics and wordCounts for each user
are saved in this feature object instance to be retrieved later.
Parameters
----------
profileList : list, default=None, required
List containing relevant profiles for which to extract features.
Returns
-------
np.array(outputList) : numpy.array
Features in correct output format.
"""
if self.glove is None:
raise Exception("GloVe not loaded.")
# will contain the GloVe measures for each profile
outputList = []
# get index as list, for faster lookup
index_as_list = self.glove.get_index_list()
# initialize progress bar
helper = Helper()
numProfiles = len(profileList)
helper.printProgressBar(
0,
numProfiles,
prefix='Progress:',
suffix='Complete',
length=50
)
# list for saving coverage statistics
coverageStatistics = []
# word count, that are included, for profiles
wordCounts = []
# loop over profileList
for num, profile in enumerate(profileList):
# tokenize text in tweets
# separated by space
tokens = profile.text.split(' ')
profile_vectors = []
# for each word lookup glove vector
# if no match -> ignore it
# first identify set of words not in glove
not_in_glove = set(np.setdiff1d(tokens, index_as_list))
# get words in glove, indcluding duplicates
# so if words exist n times in text, they will be n times in list
in_glove = [word for word in tokens if word not in not_in_glove]
if len(in_glove) == 0:
# es konnte kein wort in glove gefunden werden
# raise Exception
eString = (
"Could not find any glove values for given words"
)
raise NoGloveValueError(eString)
else:
# mind. ein Wort wurde gefunden
# lookup glove vectors
# should return duplicates!
glove_values = self.glove.getGloVeByWordList(
wordList=in_glove
)
converted_vals = np.array(glove_values)
# add vectors to list of this profile's vectors
profile_vectors.append(converted_vals)
# fill coverage statistics as share of tokens (=words)
# that exist in glove in comparison to total tokens
profile_coverage = len(converted_vals) / len(tokens)
# add to global list
coverageStatistics.append(profile_coverage)
wordCounts.append(len(tokens))
# after all vectors for this profile are retrieved
# condense with maximum, minimum, average in 900 dim vector
final_vector = self._condenseGloVeVectors(profile_vectors)
# add 900 dim to output list
outputList.append(final_vector)
# Update Progress Bar
helper.printProgressBar(
num + 1,
numProfiles,
prefix='Progress:',
suffix='Complete',
length=50
)
# save coverage statistics in class attribute to be accessible
self.coverageStatistics = coverageStatistics
self.wordCounts = wordCounts
# create numpy array, as scikit needs this format
return np.array(outputList)
def createGloVeFeaturePipeline(
self,
glovePath='data/glove/glove.db',
dataBaseMode=True,
):
"""
Create pipeline that can be passed into multiple training procceses
this is just a blueprint for calculating the features
no features are calculated yet!
No parallelization (n_jobs=1) due to GloVe lookup in database.
Parameters
----------
glovePath : string, default='data/glove/glove.db'
Path to GloVe flat or database file.
dataBaseMode : boolean, default=True
If True path points to SQLite database file.
Returns
-------
featurePipeline : Pipeline
Pipeline containing feature generation.
"""
glove = GloVe(
filePath=glovePath,
dataBaseMode=dataBaseMode,
)
self.glove = glove
# Create skicit-learn compatible FunctionTransformers
# for usage with other sklearn functions
# featureGloVe is the name of the function to be called to
# extract features
glove_Trans = FunctionTransformer(self.featureGloVe, validate=False)
# Combine feature(s) with FeatureUnion
featureTransformer = FeatureUnion([
('glove', glove_Trans),
], n_jobs=1) # no parallelization
# combine into a pipeline, no scaling since GloVe is scaled
featurePipeline = Pipeline([
('features', featureTransformer)
])
return featurePipeline
| 32.351724 | 77 | 0.588361 | 9,013 | 0.960669 | 0 | 0 | 0 | 0 | 0 | 0 | 4,574 | 0.487529 |
7dfc55af75328775b1d9e9abc358301541231f7c | 1,383 | py | Python | tests/unit/test_serializers.py | launchpadrecruits/placebo | 7b6db70a341d935a2e250b76d1ea47e56e8c9d92 | [
"Apache-2.0"
]
| 1 | 2019-06-10T13:52:41.000Z | 2019-06-10T13:52:41.000Z | tests/unit/test_serializers.py | launchpadrecruits/placebo | 7b6db70a341d935a2e250b76d1ea47e56e8c9d92 | [
"Apache-2.0"
]
| 1 | 2018-10-01T13:11:50.000Z | 2018-10-01T13:11:50.000Z | tests/unit/test_serializers.py | launchpadrecruits/lpr-placebo | 7b6db70a341d935a2e250b76d1ea47e56e8c9d92 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
import json
from placebo.serializer import serialize, deserialize, utc
date_sample = {
"LoginProfile": {
"UserName": "baz",
"CreateDate": datetime.datetime(2015, 1, 4, 9, 1, 2, 0, tzinfo=utc),
}
}
date_json = """{"LoginProfile": {"CreateDate": {"__class__": "datetime", "day": 4, "hour": 9, "microsecond": 0, "minute": 1, "month": 1, "second": 2, "year": 2015}, "UserName": "baz"}}"""
class TestSerializers(unittest.TestCase):
def test_datetime_to_json(self):
result = json.dumps(date_sample, default=serialize, sort_keys=True)
self.assertEqual(result, date_json)
def test_datetime_from_json(self):
response = json.loads(date_json, object_hook=deserialize)
self.assertEqual(response, date_sample)
| 33.731707 | 187 | 0.707158 | 353 | 0.255242 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.566884 |
7dfc5fe7b48790825f5784ca8956028cbaaac9a8 | 1,267 | py | Python | Chapter11/web_03.py | vabyte/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
]
| 84 | 2018-08-09T09:30:03.000Z | 2022-01-04T23:20:38.000Z | Chapter11/web_03.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
]
| 1 | 2019-11-04T18:57:40.000Z | 2020-09-07T08:52:25.000Z | Chapter11/web_03.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
]
| 33 | 2018-09-26T11:05:55.000Z | 2022-03-15T10:31:10.000Z | import urllib.request
import urllib.parse
import json
def http_request(url, query=None, method=None, headers={}, data=None):
"""Perform an HTTP request and return the associated response."""
parts = vars(urllib.parse.urlparse(url))
if query:
parts['query'] = urllib.parse.urlencode(query)
url = urllib.parse.ParseResult(**parts).geturl()
r = urllib.request.Request(url=url, method=method, headers=headers,
data=data)
with urllib.request.urlopen(r) as resp:
msg, resp = resp.info(), resp.read()
if msg.get_content_type() == 'application/json':
resp = json.loads(resp.decode('utf-8'))
return msg, resp
if __name__ == '__main__':
msg, resp = http_request(
'https://httpbin.org/get',
query={
'a': 'Hello',
'b': 'World'
}
)
print(msg.get_content_type(), resp)
msg, resp = http_request('https://httpbin.org/bytes/16')
print(msg.get_content_type(), resp)
msg, resp = http_request('https://httpbin.org/post', method='POST',
data='This is my posted data!'.encode('ascii'),
headers={'Content-Type': 'text/plain'})
print(msg.get_content_type(), resp) | 31.675 | 75 | 0.594317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.21468 |
7dfda8cef5923a2a0d78158e8c874838389cfd46 | 3,678 | py | Python | src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
]
| 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
]
| 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
]
| 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExternalMaster(object):
"""
An external master name server used as the source of zone data.
"""
def __init__(self, **kwargs):
"""
Initializes a new ExternalMaster object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param address:
The value to assign to the address property of this ExternalMaster.
:type address: str
:param port:
The value to assign to the port property of this ExternalMaster.
:type port: int
:param tsig_key_id:
The value to assign to the tsig_key_id property of this ExternalMaster.
:type tsig_key_id: str
"""
self.swagger_types = {
'address': 'str',
'port': 'int',
'tsig_key_id': 'str'
}
self.attribute_map = {
'address': 'address',
'port': 'port',
'tsig_key_id': 'tsigKeyId'
}
self._address = None
self._port = None
self._tsig_key_id = None
@property
def address(self):
"""
**[Required]** Gets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:return: The address of this ExternalMaster.
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""
Sets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:param address: The address of this ExternalMaster.
:type: str
"""
self._address = address
@property
def port(self):
"""
Gets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:return: The port of this ExternalMaster.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:param port: The port of this ExternalMaster.
:type: int
"""
self._port = port
@property
def tsig_key_id(self):
"""
Gets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:return: The tsig_key_id of this ExternalMaster.
:rtype: str
"""
return self._tsig_key_id
@tsig_key_id.setter
def tsig_key_id(self, tsig_key_id):
"""
Sets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:param tsig_key_id: The tsig_key_id of this ExternalMaster.
:type: str
"""
self._tsig_key_id = tsig_key_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 27.244444 | 245 | 0.609027 | 3,138 | 0.853181 | 0 | 0 | 3,168 | 0.861338 | 0 | 0 | 2,370 | 0.644372 |
7dff476f5b07538c175407ac6793f4c21aad8c8f | 899 | py | Python | cinder/tests/unit/backup/fake_service_with_verify.py | puremudassir/cinder | 99aad0d094e726d328ea815cea8ebdc14957da8c | [
"Apache-2.0"
]
| null | null | null | cinder/tests/unit/backup/fake_service_with_verify.py | puremudassir/cinder | 99aad0d094e726d328ea815cea8ebdc14957da8c | [
"Apache-2.0"
]
| null | null | null | cinder/tests/unit/backup/fake_service_with_verify.py | puremudassir/cinder | 99aad0d094e726d328ea815cea8ebdc14957da8c | [
"Apache-2.0"
]
| null | null | null | # Copyright (C) 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.backup import driver
from cinder.tests.unit.backup import fake_service
class FakeBackupServiceWithVerify(driver.BackupDriverWithVerify,
fake_service.FakeBackupService):
def verify(self, backup):
pass
| 37.458333 | 78 | 0.719689 | 174 | 0.193548 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.694105 |
b400a722c717d6322475d075e5e6ca07343e213f | 2,195 | py | Python | src/fasttick.py | JevinJ/Bittrex-Notify | ea1057fa2fd59d191893eb7a4c31f35db789ba29 | [
"MIT"
]
| 12 | 2017-08-15T08:40:44.000Z | 2018-01-30T20:55:20.000Z | src/fasttick.py | alimogh/BittrexNotify | ea1057fa2fd59d191893eb7a4c31f35db789ba29 | [
"MIT"
]
| 5 | 2017-08-30T15:46:03.000Z | 2018-02-16T09:18:27.000Z | src/fasttick.py | alimogh/BittrexNotify | ea1057fa2fd59d191893eb7a4c31f35db789ba29 | [
"MIT"
]
| 3 | 2017-08-28T17:58:03.000Z | 2017-12-05T02:05:18.000Z | import config
import misc
def heartbeat():
"""
Processes data from Bittrex into a simpler dictionary,
calls the save function on it, deletes the oldest
saved dictionary(if it's out of lookback range), and finally
creates a list of the best coins to be used in tkinter listboxes.
:return: A list containing triples of (coin name, increase rate, volume)
"""
data = misc.retrieve_data()
# Processing for saving latest data from Bittrex API
latest_data = {}
for d in data.get('result', {}):
name = d.get('Market', {}).get('MarketCurrencyLong', '')
last_price = d.get('Summary', {}).get('Last', 0.0)
last_vol = d.get('Summary', {}).get('BaseVolume', 0.0)
base_currency = d.get('Market', {}).get('BaseCurrency', '')
if base_currency == 'BTC' and last_price >= \
config.FASTTICK_MIN_PRICE and last_vol >= config.FASTTICK_MIN_VOL:
latest_data[name] = {'Summary': d['Summary']}
# Processing all data within 9 ticks + latest and returning
# rate for output in GUI
prev_data = list(misc.open_pickles('fasttick_history', config.FASTTICK_LOOKBACK))
prev_data.append(latest_data)
ticker_data = []
if prev_data:
for name in latest_data:
prev_changes = []
for i in range(len(prev_data)-1):
old_price = float(prev_data[i].get(name, {}).get('Summary', {}).get('Last', 0.0))
new_price = float(prev_data[i+1].get(name, {}).get('Summary', {}).get('Last', 0.0))
if old_price != 0:
change = (((new_price - old_price) / old_price) * 100)
prev_changes.append(change)
if prev_changes:
volume = float(latest_data.get(name, {}).get('Summary', {}).get('BaseVolume', 0.0))
average_rate = float((sum(prev_changes) / len(prev_changes)))
if average_rate >= config.FASTTICK_MIN_RATE:
ticker_data.append((name, average_rate, volume))
misc.save_pickle(latest_data, 'fasttick_history')
misc.delete_ancient_pickles('fasttick_history', config.FASTTICK_LOOKBACK)
return ticker_data | 45.729167 | 99 | 0.615034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 698 | 0.317995 |
b4012c4378e508ce63325920dec3916fc3ec12bc | 2,325 | py | Python | src/mlb_statsapi/model/api/game.py | power-edge/mlb_statsapi_etl | 9cca2ae059e8aab98ed460e7b71ad6eeeed09ffe | [
"Apache-2.0"
]
| null | null | null | src/mlb_statsapi/model/api/game.py | power-edge/mlb_statsapi_etl | 9cca2ae059e8aab98ed460e7b71ad6eeeed09ffe | [
"Apache-2.0"
]
| null | null | null | src/mlb_statsapi/model/api/game.py | power-edge/mlb_statsapi_etl | 9cca2ae059e8aab98ed460e7b71ad6eeeed09ffe | [
"Apache-2.0"
]
| null | null | null | """
created by nikos at 4/26/21
"""
import datetime
from ..base import MLBStatsAPIEndpointModel
from mlb_statsapi.utils.stats_api_object import configure_api
YMDTHMS = '%Y-%m-%dT%H:%M:%SZ'
YYYYMMDD_HHMMSS = '%Y%m%d_%H%M%S'
MMDDYYYY_HHMMSS = '%m%d%Y_%H%M%S'
class GameModel(MLBStatsAPIEndpointModel):
date_formats = {
'updatedSince': YMDTHMS,
'timecode': YYYYMMDD_HHMMSS,
'startTimecode': MMDDYYYY_HHMMSS,
'endTimecode': MMDDYYYY_HHMMSS
}
@configure_api
def liveGameV1(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def liveGameDiffPatchV1(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def liveTimestampv11(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def currentGameStats(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def getGameContextMetrics(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def getWinProbability(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def boxscore(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def content(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def colorFeed(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def colorTimestamps(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def linescore(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def playByPlay(self, **kwargs):
return self.get_api_file_object(**kwargs)
@property
def _methods(self) -> dict: return {m.__name__: m for m in (
self.liveGameV1,
self.liveGameDiffPatchV1,
self.liveTimestampv11,
self.currentGameStats,
self.getGameContextMetrics,
self.getWinProbability,
self.boxscore,
self.content,
self.colorFeed,
self.colorTimestamps,
self.linescore,
self.playByPlay
)}
@property
def now_timestamp(self):
return datetime.datetime.now().strftime(YYYYMMDD_HHMMSS)
| 25.549451 | 64 | 0.667957 | 2,062 | 0.886882 | 0 | 0 | 1,757 | 0.755699 | 0 | 0 | 137 | 0.058925 |
b401775f5af0e9b7b7978646db33631b271d516f | 4,351 | py | Python | scripts/build_folding_map.py | tsieprawski/md4c | 9d99b1262de3353f0530ac6b31d8c6934003b61f | [
"MIT"
]
| 475 | 2016-11-27T18:37:51.000Z | 2022-03-30T19:46:29.000Z | scripts/build_folding_map.py | tsieprawski/md4c | 9d99b1262de3353f0530ac6b31d8c6934003b61f | [
"MIT"
]
| 173 | 2016-12-05T01:38:37.000Z | 2022-01-14T10:06:30.000Z | scripts/build_folding_map.py | tsieprawski/md4c | 9d99b1262de3353f0530ac6b31d8c6934003b61f | [
"MIT"
]
| 110 | 2016-11-29T20:02:16.000Z | 2022-03-30T23:51:58.000Z | #!/usr/bin/env python3
import os
import sys
import textwrap
self_path = os.path.dirname(os.path.realpath(__file__));
f = open(self_path + "/unicode/CaseFolding.txt", "r")
status_list = [ "C", "F" ]
folding_list = [ dict(), dict(), dict() ]
# Filter the foldings for "full" folding.
for line in f:
comment_off = line.find("#")
if comment_off >= 0:
line = line[:comment_off]
line = line.strip()
if not line:
continue
raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3)
if not status.strip() in status_list:
continue
codepoint = int(raw_codepoint.strip(), 16)
mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")]
mapping_len = len(mapping)
if mapping_len in range(1, 4):
folding_list[mapping_len-1][codepoint] = mapping
else:
assert(False)
f.close()
# If we assume that (index0 ... index-1) makes a range (as defined below),
# check that the newly provided index is compatible with the range too; i.e.
# verify that the range can be extended without breaking its properties.
#
# Currently, we can handle ranges which:
#
# (1) either form consecutive sequence of codepoints and which map that range
# to other consecutive range of codepoints (of the same length);
#
# (2) or a consecutive sequence of codepoints with step 2 where each codepoint
# CP is mapped to the codepoint CP+1
# (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...).
#
# Note: When the codepoints in the range are mapped to multiple codepoints,
# only the 1st mapped codepoint is considered. All the other ones have to be
# shared by all the mappings covered by the range.
def is_range_compatible(folding, codepoint_list, index0, index):
N = index - index0
codepoint0 = codepoint_list[index0]
codepoint1 = codepoint_list[index0+1]
codepointN = codepoint_list[index]
mapping0 = folding[codepoint0]
mapping1 = folding[codepoint1]
mappingN = folding[codepointN]
# Check the range type (1):
if codepoint1 - codepoint0 == 1 and codepointN - codepoint0 == N \
and mapping1[0] - mapping0[0] == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - mapping0[0] == N and mappingN[1:] == mapping0[1:]:
return True
# Check the range type (2):
if codepoint1 - codepoint0 == 2 and codepointN - codepoint0 == 2 * N \
and mapping0[0] - codepoint0 == 1 \
and mapping1[0] - codepoint1 == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - codepointN == 1 and mappingN[1:] == mapping0[1:]:
return True
return False
def mapping_str(list, mapping):
return ",".join("0x{:04x}".format(x) for x in mapping)
for mapping_len in range(1, 4):
folding = folding_list[mapping_len-1]
codepoint_list = list(folding)
index0 = 0
count = len(folding)
records = list()
data_records = list()
while index0 < count:
index1 = index0 + 1
while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1):
index1 += 1
if index1 - index0 > 2:
# Range of codepoints
records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]]))
index0 = index1
else:
# Single codepoint
records.append("S(0x{:04x})".format(codepoint_list[index0]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
index0 += 1
sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
| 35.958678 | 107 | 0.6302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,198 | 0.275339 |
b4024d84d4513279dde8eeb7b78e3491e9770d6e | 1,038 | py | Python | app/api/v1/models/user_model.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
]
| null | null | null | app/api/v1/models/user_model.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
]
| null | null | null | app/api/v1/models/user_model.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
]
| 1 | 2019-02-05T07:44:19.000Z | 2019-02-05T07:44:19.000Z | users = []
class UserModel(object):
"""Class user models."""
def __init__(self):
self.db = users
def add_user(self, fname, lname, email, phone, password, confirm_password, city):
""" Method for saving user to the dictionary """
payload = {
"userId": len(self.db)+1,
"fname": fname,
"lname": lname,
"email": email,
"phone": phone,
"password": password,
"confirm_password": confirm_password,
"city": city,
}
self.db.append(payload)
return self.db
def check_email(self, email):
"""Method for checking if user email exist"""
user = [user for user in users if user['email'] == email]
if user:
return True
return False
def check_user(self, userId):
"""Method for checking if user exist"""
user = [user for user in users if user['userId'] == userId]
if user:
return True
return False
| 26.615385 | 85 | 0.531792 | 1,023 | 0.985549 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.232177 |
b402736fe41a1923f5e1f2be2b9ac727b56303ec | 6,644 | py | Python | Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py | JoaquinRodriguez2006/RoboCup_Junior_Material | 04f295010272fb8287c8f214bf69f1a61ee2b7cf | [
"MIT"
]
| null | null | null | Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py | JoaquinRodriguez2006/RoboCup_Junior_Material | 04f295010272fb8287c8f214bf69f1a61ee2b7cf | [
"MIT"
]
| null | null | null | Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py | JoaquinRodriguez2006/RoboCup_Junior_Material | 04f295010272fb8287c8f214bf69f1a61ee2b7cf | [
"MIT"
]
| 1 | 2022-03-19T22:57:33.000Z | 2022-03-19T22:57:33.000Z | from controller import Robot
from controller import Motor
from controller import PositionSensor
from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter
import cv2
import numpy as np
import math
import time
robot = Robot()
timeStep = 32
tile_size = 0.12
speed = 6.28
media_baldoza = 0.06
estado = 1
start = 0
global r
global g
global b
r = 0
g = 0
b = 0
# start = robot.getTime()
# Camera initialization
camera = robot.getDevice("camera3")
camera.enable(timeStep)
# Colour sensor initialization
colour_sensor = robot.getDevice("colour_sensor")
colour_sensor.enable(timeStep)
# Distance sensor initialization
distancia_sensor1 = robot.getDevice("distance sensor1")
distancia_sensor1.enable(timeStep)
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
rIzq_encoder = ruedaIzquierda.getPositionSensor()
rDer_encoder = ruedaDerecha.getPositionSensor()
rIzq_encoder.enable(timeStep)
rDer_encoder.enable(timeStep)
# Functions
def leer_sensores():
global r
global g
global b
# Color sensor
image = colour_sensor.getImage()
r = colour_sensor.imageGetRed(image, 1, 0, 0)
g = colour_sensor.imageGetGreen(image, 1, 0, 0)
b = colour_sensor.imageGetBlue(image, 1, 0, 0)
# azul: r=65 g=65 b=252
# rojo: r=252 g=65 b=65
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
"""
# Camara
image = camera.getImage()
imagen = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
frame = cv2.cvtColor(imagen, cv2.COLOR_BGRA2BGR)
cv2.imshow("frame", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale
cv2.imshow("grayScale", frame)
cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold
cv2.imshow("thresh", frame)
cv2.waitKey(1)
# Sensor de Distancia
print("Distancia: " + str(distancia_sensor1.getValue()))
"""
def avanzar(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(vel)
def retroceder(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(-vel)
def girar_der(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(vel)
def girar_izq(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(-vel)
gyro = robot.getDevice("gyro")
gyro.enable(timeStep)
def rotar(angulo):
global angulo_actual
global tiempo_anterior
# iniciar_rotacion
if angulo > 0:
girar_der(0.5)
else:
girar_izq(0.5)
# Mientras no llego al angulo solicitado sigo girando
if (abs(abs(angulo) - angulo_actual) > 1):
tiempo_actual = robot.getTime()
# print("Inicio rotacion angulo", angulo, "Angulo actual:",angulo_actual)
tiempo_transcurrido = tiempo_actual - \
tiempo_anterior # tiempo que paso en cada timestep
# rad/seg * mseg * 1000
radsIntimestep = abs(gyro.getValues()[1]) * tiempo_transcurrido
degsIntimestep = radsIntimestep * 180 / math.pi
# print("rads: " + str(radsIntimestep) +
# " | degs: " + str(degsIntimestep))
angulo_actual += degsIntimestep
# Si se pasa de 360 grados se ajusta la rotacion empezando desde 0 grados
angulo_actual = angulo_actual % 360
# Si es mas bajo que 0 grados, le resta ese valor a 360
if angulo_actual < 0:
angulo_actual += 360
tiempo_anterior = tiempo_actual
# print("Angulo actual:", angulo_actual)
return False
#print("Rotacion finalizada.")
angulo_actual = 0
return True
def delay(ms):
initTime = robot.getTime() # Store starting time (in seconds)
while robot.step(timeStep) != -1:
print("delay")
if (robot.getTime() - initTime) * 1000.0 > ms: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def rotar_enclavado(angulo):
while robot.step(timeStep) != -1:
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rotar(angulo) == True: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def avance(tipo_avance):
start = rDer_encoder.getValue()
velocidad = 0
avance = 0
if tipo_avance == "medio":
velocidad = 3
avance = 2.9
elif tipo_avance == "largo":
avance = 5.9
velocidad = 5.96
elif tipo_avance == "esquina":
avance = 4.1
velocidad = 6.28
while robot.step(timeStep) != -1:
avanzar(velocidad)
leer_sensores()
tipo_pizza()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rDer_encoder.getValue() >= start + avance:
avanzar(0)
break
def retroceso(tipo_retroceso):
start = rDer_encoder.getValue()
velocidad = 0
retroceso = 0
if tipo_retroceso == "medio":
velocidad = 6.28
retroceso = 2.9
elif tipo_retroceso == "largo":
retroceso = 5.9
velocidad = 5.96
elif tipo_retroceso == "esquina":
retroceso = 4.1
velocidad = 6.28
elif tipo_retroceso == "poquito":
retroceso = 1.9
velocidad = 6.28
while robot.step(timeStep) != -1:
retroceder(velocidad)
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if start - retroceso >= rDer_encoder.getValue():
avanzar(0)
break
def tipo_pizza():
#print("valores(1): r:" + str(r) + " , g:" + str(g) + " , b:" + str(b))
if 255 >= r >= 240 and 60 <= b <= 75 and 60 <= g <= 75:
print("(Red)pasaje zona 3 a 1")
elif 150 >= r >= 100 and 210 <= b <= 230 and 60 <= g <= 75:
print("(Vaiolet)pasaje zona 2 a 3")
elif 60 <= r <= 75 and 255 >= b >= 245 and 60 <= g <= 75:
print("(Blue)pasaje zona 1 a 2")
elif 200 <= r <= 220 and 110 >= b >= 100 and 175 <= g <= 180:
print("Entered swamp")
return "swamp"
elif 250 >= r >= 230 and 250 >= b >= 235 and 250 >= g >= 235:
print("Found Checkpoint")
elif r == 233 and b == 233 and g == 233:
print("Azulejo normal")
elif 30 <= r <= 50 :
print("un agujero negro we")
retroceso("medio")
rotar_enclavado(90)
else:
return "prueba"
angulo_actual = 0
tiempo_anterior = robot.getTime()
contador = 0
while robot.step(timeStep) != -1:
avance("medio")
| 28.033755 | 124 | 0.615593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,043 | 0.307495 |
b403104a45ede1110a9c5cca95878c43993fc086 | 433 | py | Python | drip/migrations/0002_querysetrule_rule_type.py | RentFreeMedia/django-drip-campaigns | a71e5d3a3f242c04a6f7f921b85aa01daff467f8 | [
"MIT"
]
| 46 | 2020-07-23T17:47:33.000Z | 2021-11-25T16:57:35.000Z | drip/migrations/0002_querysetrule_rule_type.py | RentFreeMedia/django-drip-campaigns | a71e5d3a3f242c04a6f7f921b85aa01daff467f8 | [
"MIT"
]
| 54 | 2020-06-19T17:57:42.000Z | 2021-09-22T19:34:48.000Z | drip/migrations/0002_querysetrule_rule_type.py | kaozdl/django-drip | a71e5d3a3f242c04a6f7f921b85aa01daff467f8 | [
"MIT"
]
| 19 | 2020-08-30T05:29:13.000Z | 2022-02-08T20:27:17.000Z | # Generated by Django 3.0.7 on 2020-11-25 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drip', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='querysetrule',
name='rule_type',
field=models.CharField(choices=[('or', 'Or'), ('and', 'And')], default='and', max_length=3),
),
]
| 22.789474 | 104 | 0.577367 | 340 | 0.785219 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.265589 |
b4040d06558b8483134d9ca3f4c2ab385bbdc016 | 3,393 | py | Python | venv/lib/python3.6/site-packages/cligj/__init__.py | booklover98/A-_pathfinding | 09afebfc953ce9773bc4fc781eb6d0496caccfba | [
"MIT"
]
| null | null | null | venv/lib/python3.6/site-packages/cligj/__init__.py | booklover98/A-_pathfinding | 09afebfc953ce9773bc4fc781eb6d0496caccfba | [
"MIT"
]
| 7 | 2021-06-04T23:45:15.000Z | 2022-03-12T00:44:14.000Z | virtual/Lib/site-packages/cligj/__init__.py | owenabrams/bluemoonkampala | 8801df64e91683a2641f2cd4bcbe03ebc7f40828 | [
"MIT"
]
| null | null | null | # cligj
# Shared arguments and options.
import click
from .features import normalize_feature_inputs
# Arguments.
# Multiple input files.
files_in_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS...")
# Multiple files, last of which is an output file.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS... OUTPUT")
# Features from files, command line args, or stdin.
# Returns the input data as an iterable of GeoJSON Feature-like
# dictionaries.
features_in_arg = click.argument(
'features',
nargs=-1,
callback=normalize_feature_inputs,
metavar="FEATURES...")
# Options.
verbose_opt = click.option(
'--verbose', '-v',
count=True,
help="Increase verbosity.")
quiet_opt = click.option(
'--quiet', '-q',
count=True,
help="Decrease verbosity.")
# Format driver option.
format_opt = click.option(
'-f', '--format', '--driver', 'driver',
default='GTiff',
help="Output format driver")
# JSON formatting options.
indent_opt = click.option(
'--indent',
type=int,
default=None,
help="Indentation level for JSON output")
compact_opt = click.option(
'--compact/--not-compact',
default=False,
help="Use compact separators (',', ':').")
# Coordinate precision option.
precision_opt = click.option(
'--precision',
type=int,
default=-1,
help="Decimal precision of coordinates.")
# Geographic (default), projected, or Mercator switch.
projection_geographic_opt = click.option(
'--geographic',
'projection',
flag_value='geographic',
default=True,
help="Output in geographic coordinates (the default).")
projection_projected_opt = click.option(
'--projected',
'projection',
flag_value='projected',
help="Output in dataset's own, projected coordinates.")
projection_mercator_opt = click.option(
'--mercator',
'projection',
flag_value='mercator',
help="Output in Web Mercator coordinates.")
# Feature collection or feature sequence switch.
sequence_opt = click.option(
'--sequence/--no-sequence',
default=False,
help="Write a LF-delimited sequence of texts containing individual "
"objects or write a single JSON text containing a feature "
"collection object (the default).")
use_rs_opt = click.option(
'--rs/--no-rs',
'use_rs',
default=False,
help="Use RS (0x1E) as a prefix for individual texts in a sequence "
"as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 "
"(default is False).")
# GeoJSON output mode option.
def geojson_type_collection_opt(default=False):
return click.option(
'--collection',
'geojson_type',
flag_value='collection',
default=default,
help="Output as GeoJSON feature collection(s).")
def geojson_type_feature_opt(default=False):
return click.option(
'--feature',
'geojson_type',
flag_value='feature',
default=default,
help="Output as GeoJSON feature(s).")
def geojson_type_bbox_opt(default=False):
return click.option(
'--bbox',
'geojson_type',
flag_value='bbox',
default=default,
help="Output as GeoJSON bounding box array(s).")
| 24.586957 | 78 | 0.660183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,640 | 0.483348 |
b404133dc455d3af035e0832fd933c69627e3b05 | 2,031 | py | Python | setup.py | ELC/testnbdev | 571400a9308ba91f05f6fabad5d3f79fd4417ab1 | [
"Apache-2.0"
]
| 1 | 2021-02-19T15:34:58.000Z | 2021-02-19T15:34:58.000Z | setup.py | ELC/testnbdev | 571400a9308ba91f05f6fabad5d3f79fd4417ab1 | [
"Apache-2.0"
]
| 2 | 2021-09-28T05:49:28.000Z | 2022-02-26T10:24:52.000Z | setup.py | ELC/nbdev_template | 571400a9308ba91f05f6fabad5d3f79fd4417ab1 | [
"Apache-2.0"
]
| null | null | null | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
config = config['DEFAULT']
config_keys = 'version description keywords author author_email'.split()
expected = config_keys + "lib_name user branch license status min_python audience language".split()
for setting in expected:
assert setting in config, f"missing expected setting: {setting}"
setup_config = {setting:config[setting] for setting in config_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9'.split()
requirements = config.get('requirements','').split()
lic = licenses[config['license']]
min_python = config['min_python']
setuptools.setup(
name = config['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(config['status'])],
'Intended Audience :: ' + config['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + config['language'].title(),
] + [f'Programming Language :: Python :: {version}' for version in py_versions[py_versions.index(min_python):]],
url = config['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
dependency_links = config.get('dep_links','').split(),
python_requires = '>=' + config['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': config.get('console_scripts','').split() },
**setup_config)
| 39.823529 | 116 | 0.681438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.394879 |
b40507b05e0b887443fd6d70a1bf0020514bacc1 | 3,730 | py | Python | amaascore/tools/generate_party.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
]
| null | null | null | amaascore/tools/generate_party.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
]
| 8 | 2017-06-06T09:42:41.000Z | 2018-01-16T10:16:16.000Z | amaascore/tools/generate_party.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
]
| 8 | 2017-01-18T04:14:01.000Z | 2017-12-01T08:03:10.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from amaasutils.random_utils import random_string, random_decimal
import random
from amaascore.core.reference import Reference
from amaascore.parties.asset_manager import AssetManager
from amaascore.parties.broker import Broker
from amaascore.parties.children import Address, Email
from amaascore.parties.individual import Individual
from amaascore.parties.party import Party
def generate_common(asset_manager_id, party_id, party_status):
common = {'asset_manager_id': asset_manager_id or random.randint(1, 1000),
'party_id': party_id or str(random.randint(1, 1000)),
'party_status': party_status or 'Active',
'display_name': random_string(10),
'legal_name': random_string(10),
'url': random_string(10)
}
return common
def generate_party(asset_manager_id=None, party_id=None, party_status=None):
references = {'PartyDB': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
party = Party(**attributes)
# This is ok from a mutability perspective as the references collection doesn't trigger anything
party.references.update(references)
party.upsert_address('Registered', generate_address(address_primary=True))
party.upsert_email('Office', generate_email(email_primary=True))
return party
def generate_asset_manager(asset_manager_id=None, party_id=None, party_status=None):
references = {'LEI': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
asset_manager = AssetManager(**attributes)
asset_manager.references.update(references)
asset_manager.upsert_address('Registered', generate_address(address_primary=True))
asset_manager.upsert_email('Office', generate_email(email_primary=True))
return asset_manager
def generate_broker(asset_manager_id=None, party_id=None, party_status=None):
references = {'LEI': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
broker = Broker(**attributes)
broker.references.update(references)
broker.upsert_address('Registered', generate_address(address_primary=True))
broker.upsert_email('Office', generate_email(email_primary=True))
return broker
def generate_individual(asset_manager_id=None, party_id=None, party_status=None):
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
individual = Individual(given_names=random_string(10), surname=random_string(10), **attributes)
return individual
def generate_address(country_id=None, address_primary=False):
address = Address(line_one=random_string(20),
line_two=random.choice([None, random_string(10)]),
city=random_string(10),
region=random_string(10),
postal_code=random_string(6),
country_id=country_id or random_string(3), # Make this a real country code
address_primary=address_primary)
return address
def generate_email(email=None, email_primary=False):
return Email(email=email or (random_string(10) + '@amaas.com'), email_primary=email_primary)
def generate_parties(asset_manager_ids=[], number=5):
parties = []
for i in range(number):
party = generate_party(asset_manager_id=random.choice(asset_manager_ids))
parties.append(party)
return parties
| 44.404762 | 113 | 0.746917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 299 | 0.080161 |
b405b1ef752a1702183bea0b47a0bc6616babde1 | 9,291 | py | Python | fitgrid/utils/lmer.py | vishalbelsare/fitgrid | 0197e7a3fc2c937da03d768b5c91220eebe54a22 | [
"BSD-3-Clause"
]
| 10 | 2020-02-01T22:58:32.000Z | 2022-03-29T11:31:00.000Z | fitgrid/utils/lmer.py | vishalbelsare/fitgrid | 0197e7a3fc2c937da03d768b5c91220eebe54a22 | [
"BSD-3-Clause"
]
| 161 | 2018-09-11T16:41:30.000Z | 2021-08-03T19:26:23.000Z | fitgrid/utils/lmer.py | vishalbelsare/fitgrid | 0197e7a3fc2c937da03d768b5c91220eebe54a22 | [
"BSD-3-Clause"
]
| 4 | 2019-02-27T08:11:31.000Z | 2021-07-21T20:50:36.000Z | # -*- coding: utf-8 -*-
"""User functions to streamline working with selected pymer4 LMER fit
attributes from lme4::lmer and lmerTest for ``fitgrid.lmer`` grids.
"""
import functools
import re
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import fitgrid
from fitgrid.fitgrid import LMERFitGrid
def get_lmer_dfbetas(epochs, factor, **kwargs):
r"""Fit lmers leaving out factor levels one by one, compute DBETAS.
Parameters
----------
epochs : Epochs
Epochs object
factor : str
column name of the factor of interest
**kwargs
keyword arguments to pass on to ``fitgrid.lmer``, like ``RHS``
Returns
-------
dfbetas : pandas.DataFrame
dataframe containing DFBETAS values
Examples
--------
Example calculation showing how to pass in model fitting parameters::
dfbetas = fitgrid.utils.lmer.get_lmer_dfbetas(
epochs=epochs,
factor='subject_id',
RHS='x + (x|a)
)
Notes
-----
DFBETAS is computed according to the following formula [NieGroPel2012]_:
.. math::
DFBETAS_{ij} = \frac{\hat{\gamma}_i - \hat{\gamma}_{i(-j)}}{se\left(\hat{\gamma}_{i(-j)}\right)}
for parameter :math:`i` and level :math:`j` of ``factor``.
"""
# get the factor levels
table = epochs.table.reset_index().set_index(
[epochs.epoch_id, epochs.time]
)
levels = table[factor].unique()
# produce epochs tables with each level left out
looo_epochs = (
fitgrid.epochs_from_dataframe(
table[table[factor] != level],
time=epochs.time,
epoch_id=epochs.epoch_id,
channels=epochs.channels,
)
for level in levels
)
# fit lmer on these epochs
fitter = functools.partial(fitgrid.lmer, **kwargs)
grids = map(fitter, looo_epochs)
coefs = (grid.coefs for grid in grids)
# get coefficient estimates and se from leave one out fits
looo_coefs = pd.concat(coefs, keys=levels, axis=1)
looo_estimates = looo_coefs.loc[pd.IndexSlice[:, :, 'Estimate'], :]
looo_se = looo_coefs.loc[pd.IndexSlice[:, :, 'SE'], :]
# get coefficient estimates from regular fit (all levels included)
all_levels_coefs = fitgrid.lmer(epochs, **kwargs).coefs
all_levels_estimates = all_levels_coefs.loc[
pd.IndexSlice[:, :, 'Estimate'], :
]
# drop outer level of index for convenience
for df in (looo_estimates, looo_se, all_levels_estimates):
df.index = df.index.droplevel(level=-1)
# (all_levels_estimate - level_excluded_estimate) / level_excluded_se
dfbetas = all_levels_estimates.sub(looo_estimates, level=1).div(
looo_se, level=1
)
return dfbetas.stack(level=0)
def get_lmer_warnings(lmer_grid):
"""grid the LMERFitGrid lme4::lmer4 warnings by type
lmer warnings are a mishmash of characters, punctuation, and digits, some with
numerical values specific to the message, for instance,
| Model failed to converge with max|grad| = 0.00222262 (tol = 0.002, component 1)
| unable to evaluate scaled gradient
| boundary (singular) fit: see ?isSingular
| np.nan
The warning strings are returned as-is except for stripping
leading and trailing whitespace and the "= N.NNNNNNNN" portion of the
max \|grad\| convergence failure.
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
Returns
-------
warning_grids : dict
A dictionary, the keys are lmer warning strings, each value
is a `pandas.DataFrame` indicator grid where grid.loc[time, channel] == 1 if the
lmer warning == key, otherwise 0.
"""
if not isinstance(lmer_grid, LMERFitGrid):
msg = (
"get_lmer_warnings() must be called on an "
f"LMERFitGrid not {type(lmer_grid)}"
)
raise ValueError(msg)
# In pymer4 0.7.1+ and lme4::lmer 0.22+ warnings come back from
# lme4::lmer via pymer4 as list of strings and each LMERFitgrid
# cell may have a list of 0, 1, 2, ... ? warnings. This means
# LMERFitGrid.warnings time index may have missing time stamps (= no
# warnings), a single time stamp (one warning), or duplicate time
# stamps (> 1 warning) and np.nan at channels where there is no
# warning at that timestamp.
# strip reported decimal values so max|grad| convergence failures are one kind
tidy_strings = lmer_grid.warnings.applymap(
lambda x: re.sub(
r"max\|grad\|\s+=\s+\d+\.\d+\s+", "max|grad| ", x
).strip()
if isinstance(x, str)
else x # no warning == np.nan
).rename_axis([lmer_grid.time, "wdx", "_empty"], axis=0)
# the number and types of warning generally vary by time and/or channel
warning_kinds = (
pd.Series(tidy_strings.to_numpy().flatten()).dropna().unique()
)
# collect messy gappy, multiple warnings as a dict of key==warning,
# value==tidy time x channel indicator grid (0, 1)
warning_grids = {}
assert lmer_grid._grid.shape == lmer_grid.has_warning.shape
for warning_kind in warning_kinds:
# empty grid w/ correct shape, row index and columns
warning_grid = pd.DataFrame(
np.zeros(lmer_grid._grid.shape, dtype=int),
index=lmer_grid._grid.index.copy(),
columns=lmer_grid._grid.columns.copy(),
)
# select rows w/ at least one non-na
warning_rows = tidy_strings[tidy_strings == warning_kind].dropna(
axis=0, how="all"
)
assert warning_rows.index.names[0] == lmer_grid._grid.index.name
assert all(
warning_rows.index.get_level_values(0)
== warning_rows.index.get_level_values(0).unique()
)
for rdx, row in warning_rows.iterrows():
warning_grid.loc[rdx[0], :] = (row == warning_kind).astype(int)
assert all(warning_grid.index == lmer_grid._grid.index)
assert all(warning_grid.columns == lmer_grid._grid.columns)
warning_grids[warning_kind] = warning_grid
return warning_grids
def plot_lmer_warnings(lmer_grid, which="each", verbose=True):
"""Raster plot lme4::lmer warning grids
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
which : {"each", "all", or list of str}
select the types of warnings to plot. `each` (default) plots
each type of warning separately. `all` plots one grid showing
where any type of warning occurred. A list of strings searches
the lmer warnings and plots those that match.
verbose : bool, default=True
If `True` warn of failed matches for warnings keywords.
Examples
--------
default, plot each warning grid separately
>>> plot_lmer_warnings(lmer_grid)
one plot shows everywhere there is a warning
>>> plot_lmer_warnings(lmer_grid, which="all")
plot just warnings that match these strings
>>> plot_lmer_warnings(lmer_grid, which=["converge", "singular"])
"""
def _plot_warnings(warning, warning_grid):
# masked array non-values are transparent in pcolormesh
_, axi = plt.subplots(figsize=(12, len(warning_grid.columns) / 2))
axi.set_title(warning)
ylabels = warning_grid.columns
axi.yaxis.set_major_locator(
mpl.ticker.FixedLocator(np.arange(len(ylabels)))
)
axi.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(ylabels))
axi.pcolormesh(
warning_grid.index,
np.arange(len(ylabels)),
np.ma.masked_not_equal(warning_grid.T.to_numpy(), 1),
shading="nearest",
cmap=mpl.colors.ListedColormap(['red']),
)
# validate kwarg
if not (
isinstance(which, str)
or (
isinstance(which, list)
and all((isinstance(wrn, str) for wrn in which))
)
):
raise ValueError(
"The value for which=value must be 'any', 'each', a warning "
f"string pattern to match or list of them, not this: {which}"
)
warning_grids = get_lmer_warnings(lmer_grid)
warning_grids["all"] = lmer_grid.has_warning.astype(int)
keys = None
if which == "all":
keys = ["all"]
elif which == "each":
keys = list(warning_grids.keys())
else:
# lookup matching patterns var so as to not step on original kwarg
patterns = [which] if isinstance(which, str) else which
keys = []
for pattern in patterns:
matches = [key for key in warning_grids if pattern in key]
keys += matches # may be []
if verbose and not matches:
warnings.warn(f"warning pattern '{pattern}' not found")
assert isinstance(keys, list), f"this should be type list: {type(keys)}"
for key in keys:
if verbose:
print(f"{key}")
_plot_warnings(key, warning_grids[key])
if verbose and not keys:
warnings.warn(f"no model warnings match {which}")
| 32.486014 | 103 | 0.633839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,705 | 0.506404 |
b405ca5c19bd60bffd27ebed33907aa4cbf83da9 | 2,055 | py | Python | pyesasky/jupyter_server.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
]
| 13 | 2019-05-30T19:57:37.000Z | 2021-09-10T09:43:49.000Z | pyesasky/jupyter_server.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
]
| 21 | 2019-06-21T18:55:25.000Z | 2022-02-27T14:48:13.000Z | pyesasky/jupyter_server.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
]
| 8 | 2019-05-30T12:20:48.000Z | 2022-03-04T04:01:20.000Z | import os
import json
from hashlib import md5
from tornado import web
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
__all__ = ['load_jupyter_server_extension']
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'nbextension', 'static');
CONFIG = os.path.expanduser('~/.pyesasky')
class ESASkyFileHandler(IPythonHandler):
def get(self, filename):
filename = os.path.basename(filename)
# First we check if this is a standard file in the static directory
if os.path.exists(os.path.join(STATIC_DIR, filename)):
path = os.path.join(STATIC_DIR, filename)
else:
# If not, we open the config file which should contain a JSON
# dictionary with filenames and paths.
if not os.path.exists(CONFIG):
raise web.HTTPError(404)
with open(CONFIG) as f:
config = json.load(f)
if filename in config['paths']:
path = config['paths'][filename]
else:
raise web.HTTPError(404)
with open(path, 'rb') as f:
content = f.read()
self.finish(content)
def serve_file(path, extension=''):
if not os.path.exists(path):
raise ValueError("Path {0} does not exist".format(path))
hash = md5(path.encode('utf-8')).hexdigest() + extension
with open(CONFIG) as f:
config = json.load(f)
if hash not in config['paths']:
config['paths'][hash] = os.path.abspath(path)
with open(CONFIG, 'w') as f:
json.dump(config, f)
return '/esasky/' + hash
def load_jupyter_server_extension(nb_server_app):
web_app = nb_server_app.web_app
host_pattern = '.*$'
if not os.path.exists(CONFIG):
config = {'paths': {}}
with open(CONFIG, 'w') as f:
json.dump(config, f)
route_pattern = url_path_join(web_app.settings['base_url'], '/esasky/(.*)')
web_app.add_handlers(host_pattern, [(route_pattern, ESASkyFileHandler)])
| 27.77027 | 79 | 0.620925 | 862 | 0.419465 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.16983 |
b4073a213da55b416141036502c3d25e2d22ed63 | 3,552 | py | Python | pingpongskill/pingpongskill.py | Garvys/PingPongSkill | 71749a34772326dd83121bb0ab6fad52b7d8d694 | [
"MIT"
]
| 1 | 2017-09-22T13:30:20.000Z | 2017-09-22T13:30:20.000Z | pingpongskill/pingpongskill.py | Garvys/PingPongSkill | 71749a34772326dd83121bb0ab6fad52b7d8d694 | [
"MIT"
]
| null | null | null | pingpongskill/pingpongskill.py | Garvys/PingPongSkill | 71749a34772326dd83121bb0ab6fad52b7d8d694 | [
"MIT"
]
| null | null | null | # -*-: coding utf-8 -*-
""" Skeleton Snips skill. """
import re
import json
import os
import datetime
from text2num import text2num
from collections import defaultdict
FORMAT = '%Y.%m.%dT%H:%M:%S'
class PingPongSkill(object):
""" Skeleton Snips skill. """
def __init__(self):
pass
def handle_loser(self):
db = JsonDB()
perfs = db.compute_perfs()
if len(perfs) == 0:
print "No match registred"
return
loser = sorted(perfs.iteritems(), key=lambda x: x[1])[0][0]
print "The one who lost the most matches is {}".format(loser)
def handle_winner(self):
db = JsonDB()
perfs = db.compute_perfs()
if len(perfs) == 0:
print "No match registred"
return
loser = sorted(perfs.iteritems(), key=lambda x: -x[1])[0][0]
print "The one who lost the most matches is {}".format(loser)
def handle_terminate_game(self, winner, loser, score):
print "*** {} {} {}".format(winner, loser, score)
try:
score = parse_core(score)
except ValueError, err:
print err
db = JsonDB()
timestamp = datetime.datetime.now().strftime(FORMAT)
db.add(winner, loser, score[0], score[1], timestamp)
print "I added the match {} versus {}: score: {}".format(winner,
loser,
score)
regex = re.compile('([\w\s]+)to([\w\s]+)')
def parse_core(score):
match = regex.search(score)
if not match or len(match.groups()) != 2:
raise ValueError("{} is an incorrect score".format(score))
score_1 = text2num(match.groups()[0].strip())
score_2 = text2num(match.groups()[1].strip())
if score_1 != 11 and score_2 != 11:
raise ValueError(
"{} is an incorrect score: one of the player needs to have "
"11".format(
score))
return sorted([score_1, score_2], reverse=True)
class JsonDB(object):
path = 'ping_pong_db.json'
def __init__(self):
if not os.path.exists(self.path):
self._results = []
else:
with open(self.path, 'r') as f:
results = json.load(f)
self._results = results
def add(self, player_1, player_2, score_player_1, score_player_2,
datetime_str):
self._results += [
(datetime_str, player_1, player_2, score_player_1, score_player_2)]
self.save_results()
def save_results(self):
with open(self.path, 'w') as f:
json.dump(self._results, f)
def compute_perfs(self):
player_to_win = defaultdict(int)
player_to_lose = defaultdict(int)
for _, win, lose, _, _ in self._results:
player_to_win[win] += 1
player_to_lose[lose] += 1
player_to_proportion = {}
for player in set(player_to_win.keys() + player_to_lose.keys()):
proportion = float(player_to_win[player]) / (
player_to_win[player] + player_to_lose[player])
player_to_proportion[player] = proportion
return player_to_proportion
if __name__ == '__main__':
scores = [
'eleven to two',
'twenty to eleven'
]
for score in scores:
print parse_core(score)
PingPongSkill().handle_loser()
PingPongSkill().handle_terminate_game('thib', 'alex', 'eleven to two')
PingPongSkill().handle_loser()
| 30.62069 | 79 | 0.566441 | 2,486 | 0.699887 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.136824 |
b407548d1539781a310dd11a278698c4338d7000 | 13,006 | py | Python | xarray/backends/npy_io.py | martinResearch/xarray | e921d1bfa4785b10310f8b5d46a1efacba7e1cc9 | [
"Apache-2.0"
]
| null | null | null | xarray/backends/npy_io.py | martinResearch/xarray | e921d1bfa4785b10310f8b5d46a1efacba7e1cc9 | [
"Apache-2.0"
]
| null | null | null | xarray/backends/npy_io.py | martinResearch/xarray | e921d1bfa4785b10310f8b5d46a1efacba7e1cc9 | [
"Apache-2.0"
]
| null | null | null |
import numpy as np
import xarray as xr
import pandas as pd
import sys
import json
import os
import datetime
from xarray.core.utils import (
decode_numpy_dict_values,
either_dict_or_kwargs,
ensure_us_time_resolution,
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
from numpy.lib import format
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, datetime.datetime):
return obj.__str__()
if isinstance(obj, np.datetime64):
return obj.__str__()
return json.JSONEncoder.default(self, obj)
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def myJsonConverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def save_npys(file, data, compress=False,min_dims_coord_npy = 2):
if isinstance(data,xr.DataArray):
_save_dataarray(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
elif isinstance(data,xr.Dataset):
_save_dataset(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
else:
raise BaseException('Unexpected type %'%str(type(data)))
class zip_file():
def __init__(self,file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
self.file_dir = file_dir
self.file_prefix = file_prefix
self.zipf = zipfile.ZipFile(file, *args, **kwargs)
def close(self):
self.zipf.close()
def open(self,x):
return self.zipf.open(x)
def read(self,x):
return self.zipf.read(x)
def namelist(self):
return self.zipf.namelist()
def add_bin_data(self,fname,data_bytes):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
fid.write(data_bytes)
else:
import tempfile
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
fid.write(data_bytes)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def add_npy(self,fname,val):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
else:
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def _save_dataarray(file, dataarray, compress=False, min_dims_coord_npy =2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
if dataarray.name is None:
data_name = 'data'
else:
data_name = dataarray.name
zipf.add_npy(data_name+'.npy',dataarray.values)
d = dataarray.variable.to_dict(data=False)
d['version'] = xr.__version__
d.update({"coords": {}, "name": dataarray.name})
for k in dataarray.coords:
assert(k!=data_name)
coord_var = dataarray.coords[k].variable
item = {"attrs": decode_numpy_dict_values(coord_var.attrs), "dtype":str(coord_var.values.dtype)}# we save the type here
if (coord_var.dims!=()) and( len(coord_var.dims)>1 or coord_var.dims[0]!=k): # we don't keep the dims if we have a dimension_coordinate or if dims is empty to keep the json more concise (see http://xarray.pydata.org/en/stable/data-structures.html#coordinates)
item['dims'] = coord_var.dims
if (coord_var.dims!=()) and len(coord_var.dims)>=min_dims_coord_npy:
zipf.add_npy(k+'.npy',coord_var.values)
else:
item["data"] = ensure_us_time_resolution(coord_var.values) # keeping coordinates data in the json
d["coords"][k] = item
json_str = json.dumps(d,cls=NumpyEncoder) + "\n" # 2. string (i.e. JSON)
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('DataArray.json',json_bytes)
zipf.close()
def _save_dataset(file, dataset, compress=False, min_dims_coord_npy = 2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
dataset_dict = dataset.to_dict(data = False)
dataset_dict['version'] = xr.__version__
for key, array in dict(dataset.data_vars).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['data_vars'][key]['data']=ensure_us_time_resolution(val)
for key, array in dict(dataset.coords).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['coords'][key]['data']=ensure_us_time_resolution(val)
json_str = json.dumps(dataset_dict,cls=NumpyEncoder) + "\n"
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('Dataset.json', json_bytes)
zipf.close()
def load_npys(file):
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
if True:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
_zip = zip_file(fid)
files = _zip.namelist()
_data_dict={}
_type = None
for x in files:
if x.endswith('.npy'):
bytes = _zip.open(x)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
assert( magic == format.MAGIC_PREFIX)
bytes = _zip.open(x)
_data_dict[x[:-4]] = format.read_array(bytes, allow_pickle=False, pickle_kwargs=None)
elif x=='Dataset.json':
assert(_type is None)
_type = xr.Dataset
header = json.loads(_zip.read(x))
elif x=='DataArray.json':
assert(_type is None)
_type = xr.DataArray
header = json.loads(_zip.read(x))
if _type is None:
raise IOError("Failed to read file")
if _type == xr.DataArray:
if 'name' in header and (header['name'] is not None):
data_name = header['name']
else:
data_name = 'data'
data = _data_dict[data_name]
assert (data.dtype==header['dtype'])
assert (data.shape==tuple(header['shape']))
coords={}
for k,coord in header['coords'].items():
if 'data' in coord:
coord_data = np.array(coord['data'],dtype=coord['dtype'])
else:
coord_data = _data_dict[k]
if 'dims' in coord:
dims=coord['dims']
elif coord_data.ndim==0:
dims=()
else:
dims= [k]
coords[k]=xr.DataArray(coord_data,dims=dims)
return xr.DataArray(data, coords = coords, dims=header['dims'],attrs=header['attrs'],name=header['name'])
else: # type is Dataset
coords={}
data_vars={}
for k,d in header['coords'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
coords[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
for k,d in header['data_vars'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
data_vars[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
return xr.Dataset(data_vars, coords=coords,attrs=header['attrs'])
else:
raise IOError(
"Failed to interpret file %s as a zip" % repr(file))
return None
def test():
from xarray.testing import assert_identical
data = np.random.rand(4, 3)
locs = ['IA', 'IL', 'IN']
times = pd.date_range('2000-01-01', periods=4)
foo = xr.DataArray(data, coords=[times, locs], dims=['time', 'space'])
v=foo.coords['time'].variable
save_npys('foo',foo)
foo_loaded = load_npys('foo.xar')
assert_identical(foo,foo_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
da = xr.DataArray(temp,name='precipitations',dims=['x','y','time'],
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('da',da)
da_loaded=load_npys('da.xar')
assert_identical(da,da_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
precip = 10 * np.random.rand(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
ds = xr.Dataset({'temperature' : (['x', 'y', 'time'], temp),
'precipitation': (['x', 'y', 'time'], precip)},
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('ds',ds,min_dims_coord_npy=1)
ds_loaded= load_npys('ds.xar')
assert_identical(ds, ds_loaded)
if __name__ == "__main__":
test()
| 39.531915 | 267 | 0.541827 | 3,381 | 0.259957 | 0 | 0 | 0 | 0 | 0 | 0 | 2,030 | 0.156082 |
b408eeeaec183c35458c8ea0619e1ec8dfb285b7 | 14,222 | py | Python | applications/popart/bert/bert_data/squad_dataset.py | Alwaysproblem/examples-1 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | [
"MIT"
]
| null | null | null | applications/popart/bert/bert_data/squad_dataset.py | Alwaysproblem/examples-1 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | [
"MIT"
]
| null | null | null | applications/popart/bert/bert_data/squad_dataset.py | Alwaysproblem/examples-1 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | [
"MIT"
]
| null | null | null | # Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import random
import pickle
import json
import fractions
import math
import subprocess
from logging import getLogger
from functools import reduce
from .dataset import DataSet
from .data_sampler import SequentialSampler, ShuffledSampler, DistributedDataSampler
from .tokenization import FullTokenizer
from .squad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, InputFeatures
logger = getLogger(__name__)
def generate_random_features(sequence_length, vocab_length, batch_size):
features = []
for i in range(batch_size):
features.append(InputFeatures(
i,
None,
None,
None,
None,
None,
np.random.randint(0, vocab_length, size=sequence_length),
None,
np.random.randint(0, 2, size=sequence_length),
0,
None,
None,
np.random.randint(0, sequence_length, size=1),
np.random.randint(0, sequence_length, size=1),
None,
np.random.randint(0, sequence_length+1, size=1)
))
return features
class SquadDataLoader(object):
def __init__(self,
features,
sequence_length=None,
batch_size=1,
dtype=np.int32,
sampler=None):
self.features = features
self.batch_size = batch_size
self.dtype = dtype
self.sequence_length = sequence_length
self.sampler = sampler
if sampler is None:
self.sampler = SequentialSampler(features)
self.num_batches = len(self.sampler)//self.batch_size
def __len__(self):
return self.num_batches
def __iter__(self):
self.feature_iterator = iter([self.features[idx] for idx in self.sampler])
return self
def __next__(self):
items = [next(self.feature_iterator) for _ in range(self.batch_size)]
indicies = []
positions = []
segments = []
sequence_mask_idx = []
start_pos = []
end_pos = []
uid = []
for item in items:
indicies.append(item.input_ids)
padding_max = self.sequence_length if self.sequence_length is not None else len(item.input_ids)
padding_length = len(item.input_ids) - item.padding_start_index
position_padding = np.full(padding_length, padding_max)
position_ids = np.arange(0, item.padding_start_index)
positions.append(np.concatenate((position_ids, position_padding)).astype(np.int32))
segments.append(item.segment_ids)
sequence_mask_idx.append(item.padding_start_index)
start_pos.append(item.start_position)
end_pos.append(item.end_position)
uid.append(item.unique_id)
# Including impossible samples during training is under investigation. T12851
# if item.is_impossible:
# logger.warning("Impossible sample exists in the dataset. "
# f"start pos: {item.start_position}, end pos: {item.end_position}")
inputs = []
for i in [indicies, positions, segments, sequence_mask_idx, start_pos, end_pos, uid]:
inputs.append(np.stack(i))
return inputs
class BertDataTransform(object):
'''
Masks the indices that are larger than the vocab_length
'''
def __init__(self, dataloader, vocab_length, sequence_length, embedding_dict, positional_dict, merge_both_embeddings, is_training=True):
self.dataloader = dataloader
self.vocab_length = vocab_length
self.sequence_length = sequence_length
self.is_training = is_training
self.embedding_dict = embedding_dict
self.positional_dict = positional_dict
self.merge_both_embeddings = merge_both_embeddings
def __len__(self):
return len(self.dataloader)
def __iter__(self):
self.dataloader_iterator = iter(self.dataloader)
return self
def __next__(self):
items = next(self.dataloader_iterator)
# Specific BERT Post Processing. TODO: Find a better place for this processing
# The vocab_length may be smaller than the original vocab... In this case with the custom_op
# Out of Bounds indicies over a certain threshold will cause numerical issues.
# 100 is unknown token [UNK]
# 0 in the label is padding
OOB = items[0] >= self.vocab_length
items[0][OOB] = 100
# Force use of uint32 for all inputs.
for i in range(len(items)):
if self.is_training or i < 4:
items[i] = items[i].astype(np.uint32)
if self.embedding_dict is not None:
items[0] = np.take(self.embedding_dict, items[0], 0)
if self.positional_dict is not None:
positional_expanded = np.take(self.positional_dict, items[1], 0)
if self.merge_both_embeddings:
items[0] += positional_expanded
else:
items[1] = positional_expanded
return items
def load_or_cache_features(input_file,
vocab_file,
sequence_length,
is_training=True,
cache_file=None,
overwrite_cache=False,
do_lower_case=False):
if cache_file is None:
cache_file = input_file + f".{sequence_length}.cache"
if os.path.exists(cache_file) and not overwrite_cache:
examples = None
logger.info(f"Loading Cache {cache_file}")
with open(cache_file, "rb") as f:
features = pickle.load(f)
else:
logger.info("Reading Examples")
examples = read_squad_examples(input_file=input_file,
is_training=is_training,
version_2_with_negative=False)
# google-research/bert uses sequence_length 384 with doc_stride 128
# TODO: Find a good value for the doc_stride with sequence_length <384
doc_stride = 128
if sequence_length < 384:
doc_stride = 64
logger.info("Converting to Features")
features = convert_examples_to_features(examples=examples,
tokenizer=FullTokenizer(vocab_file, do_lower_case=do_lower_case),
max_seq_length=sequence_length,
doc_stride=doc_stride,
max_query_length=64,
is_training=is_training)
logger.info(f"Saving Cache {cache_file}")
with open(cache_file, "wb") as f:
pickle.dump(features, f)
return features, examples
class SquadDataSet(DataSet):
def __init__(self,
features,
examples,
input_file,
is_training,
output_dir=None,
evaluate_script=None,
do_lower_case=False,
n_extra=0,
**kwargs):
super().__init__(**kwargs)
self.features = features
self.examples = examples
self.is_training = is_training
self.input_file = input_file
self.output_dir = output_dir
self.do_lower_case = do_lower_case
if not self.is_training and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
# If examples is None, features was loaded from the cache
# So the examples need to be recreated.
if self.examples is None:
self.examples = read_squad_examples(input_file=self.input_file,
is_training=self.is_training,
version_2_with_negative=False)
self.results = []
self.evaluate_script = evaluate_script
self.n_extra = n_extra
def add_results(self, data, logits):
# Results will be batched. Flatten to individual results
start_logits, end_logits = [
logit.reshape(-1, logit.shape[-1]).tolist()
for logit in logits]
for i, unique_id in enumerate(data["uid"]):
self.results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits[i],
end_logits=end_logits[i]
))
def write_predictions(self, epoch=None):
if self.is_training:
raise RuntimeError("Predictions cannot be written for training datasets")
if self.output_dir is None:
raise RuntimeError("Predictions cannot be written when output_dir is None")
suffix = f"_{epoch}" if epoch is not None else ""
predictions_file = os.path.join(self.output_dir, f"predictions{suffix}.json")
nbest_file = os.path.join(self.output_dir, f"nbest_predictions{suffix}.json")
null_log_odds_file = os.path.join(self.output_dir, f"null_odds{suffix}.json")
self.results = self.results[:len(self.results) - self.n_extra]
write_predictions(self.examples,
self.features,
self.results,
20, 30,
self.do_lower_case,
predictions_file,
nbest_file,
null_log_odds_file,
True,
False, 0)
if self.evaluate_script is not None:
evaluation = subprocess.check_output(["python", self.evaluate_script, self.input_file, predictions_file])
evaluation = json.loads(evaluation)
f1 = evaluation["f1"]
exact_match = evaluation["exact_match"]
status_string = f"F1 Score: {f1} | Exact Match: {exact_match}"
if epoch is not None:
status_string = f"Epoch: {epoch:3}{args.epochs - 1} | " + status_string
logger.info(status_string)
def get_bert_dataset(tensor_shapes,
input_file,
output_dir,
sequence_length,
vocab_file,
vocab_length,
batch_size,
batches_per_step,
embedding_dict,
positional_dict,
merge_both_embeddings=False,
replication_factor=1,
accumulation_factor=1,
shuffle=True,
is_training=True,
overwrite_cache=False,
no_drop_remainder=False,
evaluate_script=None,
generated_data=False,
do_lower_case=False,
max_pipeline_stage=1,
seed=0,
mpi_size=1,
mpi_rank=0,
is_distributed=False):
samples_per_step = batch_size * batches_per_step * \
replication_factor * accumulation_factor
div_factor = batch_size * replication_factor * accumulation_factor * batches_per_step
pad = 0
if generated_data:
features = generate_random_features(
sequence_length, vocab_length, samples_per_step)
examples = None
output_dir = None
logger.info("Generating random dataset")
else:
features, examples = load_or_cache_features(
input_file,
vocab_file,
sequence_length,
is_training,
overwrite_cache=overwrite_cache,
do_lower_case=do_lower_case)
if no_drop_remainder and not generated_data:
# dataset will be padded to be divisible by batch-size and samples-per-step
pad = int(np.ceil(len(features)/div_factor)) * div_factor - len(features)
if is_distributed:
sampler = DistributedDataSampler(
features, seed, shuffle,
mpi_size, mpi_rank, padding=False, padding_sub=pad, div_factor=div_factor)
pad = sampler.get_subpadding_size()
elif shuffle:
sampler = ShuffledSampler(features, seed, pad)
else:
sampler = SequentialSampler(features, pad)
if no_drop_remainder and not generated_data:
logger.info(f"no_drop_remainder: Dataset padded by {pad} samples")
dl = SquadDataLoader(
features,
sequence_length=sequence_length,
batch_size=samples_per_step,
sampler=sampler
)
bert_ds = BertDataTransform(
dl,
vocab_length,
sequence_length,
embedding_dict,
positional_dict,
merge_both_embeddings,
is_training=is_training)
if not is_training:
# Add uid to the data dictionary so evaluation script can be run
tensor_shapes += [
("start", None),
("end", None),
("uid", None)]
ds = SquadDataSet(
features,
examples,
input_file,
is_training,
output_dir,
evaluate_script,
do_lower_case=do_lower_case,
n_extra=pad,
loader=bert_ds,
tensor_shapes=tensor_shapes,
batches_per_step=batches_per_step,
replication_factor=replication_factor,
accumulation_factor=accumulation_factor)
return ds
| 36.84456 | 141 | 0.592181 | 7,293 | 0.512797 | 0 | 0 | 0 | 0 | 0 | 0 | 2,230 | 0.156799 |
b40913984e0d9a08276edd74c8a43fc4a6017a70 | 9,921 | py | Python | utils.py | sWizad/HashNeRF-pytorch | e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910 | [
"MIT"
]
| null | null | null | utils.py | sWizad/HashNeRF-pytorch | e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910 | [
"MIT"
]
| null | null | null | utils.py | sWizad/HashNeRF-pytorch | e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910 | [
"MIT"
]
| null | null | null | import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def xy2index(xy,resolution):
return xy[...,0]+xy[...,1]*resolution
def get_bbox3d_for_blenderobj(camera_transforms, H, W, near=2.0, far=6.0):
camera_angle_x = float(camera_transforms['camera_angle_x'])
focal = 0.5*W/np.tan(0.5 * camera_angle_x)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
for frame in camera_transforms["frames"]:
c2w = torch.FloatTensor(frame["transform_matrix"])
rays_o, rays_d = get_rays(directions, c2w)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([1.0,1.0,1.0]), torch.tensor(max_bound)+torch.tensor([1.0,1.0,1.0]))
def get_bbox3d_for_llff(poses, hwf, near=0.0, far=1.0):
H, W, focal = hwf
H, W = int(H), int(W)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
poses = torch.FloatTensor(poses)
for pose in poses:
rays_o, rays_d = get_rays(directions, pose)
rays_o, rays_d = get_ndc_rays(H, W, focal, 1.0, rays_o, rays_d)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([0.1,0.1,0.0001]), torch.tensor(max_bound)+torch.tensor([0.1,0.1,0.0001]))
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
| 40.493878 | 123 | 0.627961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,974 | 0.299768 |
b4091bea05e2b9f2e78f9f40870c9ac7e8a9cac3 | 15,755 | py | Python | eval.py | dawnchen123/VS-Net | 21aa8873e32351716302934887f6a08e7d568ea2 | [
"Apache-2.0"
]
| 55 | 2021-04-17T08:15:06.000Z | 2022-03-30T02:38:27.000Z | eval.py | dawnchen123/VS-Net | 21aa8873e32351716302934887f6a08e7d568ea2 | [
"Apache-2.0"
]
| 3 | 2021-05-30T03:29:01.000Z | 2022-03-03T00:47:33.000Z | eval.py | dawnchen123/VS-Net | 21aa8873e32351716302934887f6a08e7d568ea2 | [
"Apache-2.0"
]
| 11 | 2021-07-01T15:15:23.000Z | 2022-02-12T06:47:26.000Z | import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
class Trainer(object):
def __init__(self, cfg):
self.cfg = cfg
# Define Saver
self.saver = Saver(cfg)
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.cfg["log_tb_dir"])
self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": cfg["num_workers"], "pin_memory": True}
self.train_loader, self.val_loader, self.test_loader, dset = make_data_loader(
cfg, **kwargs)
# read landmark centers
self.id2center = np.array(json.load(
open(osp.join(cfg["data_dir"], "id2centers.json")))).astype(np.float64)
self.coding_book = torch.zeros(
(len(self.id2center), cfg["seg_channel"]), dtype=torch.float32).cuda()
torch.nn.init.xavier_uniform(self.coding_book)
print("coding book size = {}".format(self.coding_book.shape))
# generate color map
unique_label = np.arange(len(self.id2center))
unique_label = unique_label.astype(
np.int64) * 6364136223846793005 + 1442695040888963407
color_map = np.zeros((unique_label.shape[0], 3), np.uint8)
color_map[:, 0] = np.bitwise_and(unique_label, 0xff)
color_map[:, 1] = np.bitwise_and(np.right_shift(unique_label, 4), 0xff)
color_map[:, 2] = np.bitwise_and(np.right_shift(unique_label, 8), 0xff)
self.color_map = np.array(color_map)
self.coding_book = Variable(self.coding_book, requires_grad=True)
# Define network
model = VSNet(backbone=cfg["backbone"],
seg_decoder=cfg["seg_decoder"],
vertex_decoder=cfg["vertex_decoder"],
seg_channel=cfg["seg_channel"],
vertex_channel=cfg["vertex_channel"],
output_stride=cfg["out_stride"],
sync_bn=cfg["sync_bn"])
train_params = [{"params": model.get_1x_lr_params(), "lr": cfg["lr"]},
{"params": model.get_10x_lr_params(),
"lr": cfg["lr"] * 10},
{"params": self.coding_book, "lr": cfg["lr"] * 10}
]
# Define Optimizer
if cfg["optimizer"] == "SGD":
optimizer = torch.optim.SGD(train_params, momentum=cfg["momentum"],
weight_decay=cfg["weight_decay"], nesterov=cfg["nesterov"])
elif cfg["optimizer"] == "Adam":
optimizer = torch.optim.Adam(train_params, lr=cfg["lr"],
weight_decay=cfg["weight_decay"], amsgrad=True)
else:
raise NotImplementedError
# Define Criterion
self.seg_criterion = loss_dict[cfg["seg_loss_type"]]
self.vertex_criterion = loss_dict[cfg["vertex_loss_type"]]
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.coding_book.shape[0], cfg["vertex_channel"])
# Define lr scheduler
self.scheduler = LR_Scheduler(mode=cfg["lr_scheduler"], base_lr=cfg["lr"],
num_epochs=cfg["epochs"], iters_per_epoch=len(
self.train_loader),
lr_step=cfg["lr_step"])
self.model = torch.nn.DataParallel(self.model)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = {"mIoU": 0.0, "Acc": 0.0, "Acc": 0.0,
"FWIoU": 0.0, "translation_median": 1000}
if cfg["resume"] is not None and cfg["resume"] == True:
print(os.path.isfile(cfg["resume_checkpoint"]))
if not os.path.isfile(cfg["resume_checkpoint"]):
raise RuntimeError("=> no checkpoint found at {}" .format(
cfg["resume_checkpoint"]))
checkpoint = torch.load(cfg["resume_checkpoint"])
cfg.opt["start_epoch"] = checkpoint["epoch"] - 1
self.model.module.load_state_dict(checkpoint["state_dict"])
if not cfg["ft"]:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
if "coding_book" in checkpoint.keys():
assert self.coding_book.shape == checkpoint["coding_book"].shape
self.coding_book = checkpoint["coding_book"]
else:
print("Alert! coding book does not exist in the checkpoint")
print("=> loaded checkpoint {} (epoch {})"
.format(cfg["resume"], checkpoint["epoch"]))
def validation(self, epoch):
print("=================================")
print("validation")
print("=================================")
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc="\r")
num_iter_val = len(self.val_loader)
test_loss = 0.0
num_images = 0
ten_count = []
five_count = []
three_count = []
one_count = []
translation_list = []
angular_list = []
reproject_list = []
test_seg_loss = 0.0
test_ver_loss = 0.0
for i, data in enumerate(tbar):
image, seg_target, vertex_target = [d.cuda() for d in data[:3]]
valid_mask = data[-1].cuda()
pose_target, camera_k_matrix, ori_img = data[3:]
seg_target = seg_target.long()
valid_mask = (seg_target.detach() > 0).float()
with torch.no_grad():
seg_pred, vertex_pred, seg_pred_x4s = self.model(
image)
loss_seg = 0
if self.cfg["seg_decoder"]:
loss_seg = self.seg_criterion(seg_pred, seg_target, self.coding_book,
margin=self.cfg["seg_loss_margin"],
seg_k=self.cfg["seg_k"],
valid_mask=valid_mask)
test_seg_loss += loss_seg.item()
self.summary.add_scalar(
"val/loss_seg_iter", loss_seg.item(), i + num_iter_val * epoch)
loss_vertex = 0
if self.cfg["vertex_decoder"]:
loss_vertex = self.vertex_criterion(vertex_pred, vertex_target,
valid_mask)
test_ver_loss += loss_vertex.item()
self.summary.add_scalar(
"val/loss_vertex_iter", loss_vertex.item(), i + num_iter_val * epoch)
loss = 0
if self.cfg["seg_decoder"]:
loss += loss_seg
if self.cfg["vertex_decoder"]:
loss += loss_vertex * self.cfg["vertex_loss_ratio"]
test_loss += loss.item()
tbar.set_description("Test loss: %.9f" % (test_loss / (i + 1)))
self.summary.add_scalar(
"val/total_loss_iter", loss.item(), i + num_iter_val * epoch)
global_step = i * \
self.cfg["val_batch_size"] + image.data.shape[0]
# evaluate seg_pred
seg_target = seg_target.detach().squeeze()
if self.cfg["seg_decoder"]:
seg_pred, knn = utils.evaluate_segmentation(seg_pred_x4s,
self.coding_book, seg_target.size(), self.cfg["use_own_nn"])
else:
seg_pred = seg_target
# evaluate vertex
pt3d_filter, pt2d_filter, _ = utils.evaluate_vertex_v2(vertex_pred, seg_pred,
self.id2center, inlier_thresh=0.999,
min_mask_num=self.cfg["val_label_filter_threshsold"])
# pt3d_filter, pt2d_filter = utils.evaluate_vertex(vertex_target, seg_pred, self.id2center)
camera_k_matrix = camera_k_matrix.squeeze().numpy()
translation_distance, angular_distance, error = 1e9, 1e9, 1e9
if pt2d_filter.shape[0] > 6:
# pnp
ret, pose_pred = utils.pnp(
pt3d_filter, pt2d_filter, camera_k_matrix)
error = utils.reproject_error(
pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)
translation_distance, angular_distance = utils.cm_degree_metric(
pose_pred, pose_target)
print(translation_distance, angular_distance, error, i)
ten_count.append(translation_distance <
10 and angular_distance < 10)
five_count.append(translation_distance <
5 and angular_distance < 5)
three_count.append(translation_distance <
3 and angular_distance < 3)
one_count.append(translation_distance <
1 and angular_distance < 1)
translation_list.append(translation_distance)
angular_list.append(angular_distance)
reproject_list.append(error)
# Add batch sample into evaluator
if self.cfg["seg_decoder"]:
self.evaluator.add_seg_batch(seg_target, seg_pred)
if self.cfg["visualize_segmenation"]:
self.summary.visualize_seg_image(ori_img, seg_pred, seg_target,
epoch, i, global_step, self.color_map)
if self.cfg["vertex_decoder"]:
# evaluate vertex_pred
vertex_target, vertex_pred = vertex_target.squeeze(), vertex_pred.squeeze()
self.evaluator.add_vertex_batch(vertex_target, vertex_pred)
# vertex acc的计算
if self.cfg["visualize_voting"]:
if self.cfg["visualize_landmark"] != None and self.cfg["visualize_landmark"]:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step, pt2d_filter, True)
else:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step)
mIoU, Acc, Acc_class, FWIoU = self.summary.visualize_seg_evaluator(
self.evaluator, epoch, "val/seg/")
print("Validation:")
print("[Epoch: %d, numImages: %5d]" % (epoch, num_images))
print("Loss: %.9f" % (test_loss / num_iter_val))
self.summary.add_scalar("val/total_loss_epoch",
test_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_seg_epoch",
test_seg_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_ver_epoch",
test_ver_loss / num_iter_val, epoch)
self.summary.add_scalar("val/pnp/10cm_epoch",
np.mean(ten_count), epoch)
self.summary.add_scalar("val/pnp/5cm_epoch",
np.mean(five_count), epoch)
self.summary.add_scalar("val/pnp/3cm_epoch",
np.mean(three_count), epoch)
self.summary.add_scalar("val/pnp/1cm_epoch", np.mean(one_count), epoch)
self.summary.add_scalar(
"val/pnp/translation_median_epoch", np.median(translation_list), epoch)
self.summary.add_scalar(
"val/pnp/angular_median_epoch", np.median(angular_list), epoch)
new_pred = {"mIoU": mIoU.item(), "Acc": Acc.item(), "Acc_class": Acc_class.item(), "FWIoU": FWIoU.item(),
"10cm": np.mean(ten_count),
"5cm": np.mean(five_count), "3cm": np.mean(three_count), "1cm": np.mean(one_count),
"translation_median": np.median(translation_list), "angular_list": np.median(angular_list)}
print(new_pred)
if new_pred["translation_median"] < self.best_pred["translation_median"]:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
"epoch": epoch + 1,
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"best_pred": self.best_pred,
"coding_book": self.coding_book
}, is_best, save_model=self.cfg["save_model"])
def main():
parser = argparse.ArgumentParser(
description="PyTorch Landmark Segmentation Training")
parser.add_argument("--dataset", type=str,
choices=["7scenes_loc", "cambridge_loc"], help="experiment config file")
parser.add_argument("--scene", type=str, default="",
help="experiment scene")
parser.add_argument("--gpu-id", type=str, default="",
help="experiment gpu id")
parser.add_argument("--use-aug", type=str, default="",
choices=["", "true", "false"], help="experiment use aug")
parser.add_argument("--config", type=str, default=None,
help="experiment config file")
parser.add_argument("--debug", type=str, default="",
choices=["", "true", "false"], help="debug")
parser.add_argument("--resume", type=str, default="true",
choices=["", "true", "false"], help="resume")
args = parser.parse_args()
debug = None
if args.debug != "":
debug = (args.debug == "true")
if args.dataset == "7scenes_loc":
cfg = SevenScenesLocConfig(args.config, debug)
elif args.dataset == "cambridge_loc":
cfg = CambridgeLocConfig(args.config, debug)
if args.scene != "":
cfg.opt["scene"] = args.scene
if args.gpu_id != "":
cfg.opt["devices"] = args.gpu_id
if args.use_aug == "true":
cfg.opt["use_aug"] = True
if args.resume == "true":
cfg.opt["resume"] = True
cfg.opt["resume_checkpoint"] = cfg["export_dir"] + \
'/ckpts/checkpoint-backup.pth.tar'
cfg.print_opt()
cfg.set_environmental_variables()
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed(cfg["seed"])
np.random.seed(cfg["seed"])
random.seed(cfg["seed"])
trainer = Trainer(cfg)
print("Starting Epoch:", trainer.cfg["start_epoch"])
print("Total Epoches:", trainer.cfg["epochs"])
trainer.validation(trainer.cfg["start_epoch"])
trainer.summary.close()
if __name__ == "__main__":
main()
| 45.403458 | 124 | 0.548588 | 13,034 | 0.826978 | 0 | 0 | 0 | 0 | 0 | 0 | 2,690 | 0.170674 |
b40a24b1b84590432a339ee0e8fac4f84e897ac1 | 2,692 | py | Python | data/__init__.py | Joaomlg/multilayer-perceptron-mnist | 0454c4970c3a06a37ac7c20787a1bdf1cda7da0f | [
"MIT"
]
| 13 | 2021-05-15T04:22:04.000Z | 2022-03-29T10:55:32.000Z | data/__init__.py | Joaomlg/multilayer-perceptron-mnist | 0454c4970c3a06a37ac7c20787a1bdf1cda7da0f | [
"MIT"
]
| null | null | null | data/__init__.py | Joaomlg/multilayer-perceptron-mnist | 0454c4970c3a06a37ac7c20787a1bdf1cda7da0f | [
"MIT"
]
| 4 | 2021-05-18T07:48:52.000Z | 2021-07-10T10:11:41.000Z | import numpy as np
import gzip
import pickle
import os
import urllib.request
class MNIST:
host = 'http://yann.lecun.com/exdb/mnist/'
filenames = {
'train': ('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz'),
'test': ('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'),
}
dataset_filename = 'mnist.pkl.gz'
train_samples = 50000
validation_samples = 10000
test_samples = 10000
def __init__(self):
self.current_dir = os.path.dirname(__file__)
if not self.is_dataset_available():
print('Dataset not available! It will be downloaded and decoded, and can be take a while, please wait!')
datasets = self.get_base_datasets_filenames()
for dataset in datasets:
if not self.is_base_dataset_downloaded(dataset):
print(f'Downloading {dataset}...')
self.download_dataset(dataset)
print('Decoding files and saving it...')
self.decode_and_save()
print('Deleting base files (downloaded)...')
for dataset in datasets:
self.delete_dataset(dataset)
print('Done.')
def is_dataset_available(self):
return os.path.exists(os.path.join(self.current_dir, self.dataset_filename))
def get_base_datasets_filenames(self):
return self.filenames['train'] + self.filenames['test']
def is_base_dataset_downloaded(self, filename):
return os.path.exists(os.path.join(self.current_dir, filename))
def download_dataset(self, filename):
url = self.host + filename
dest = os.path.join(self.current_dir, filename)
urllib.request.urlretrieve(url, dest)
def delete_dataset(self, filename):
os.remove(os.path.join(self.current_dir, filename))
def decode_and_save(self):
data = {}
for key, (images_filename, labels_filename) in self.filenames.items():
with gzip.open(os.path.join(self.current_dir, images_filename), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16).reshape(-1, 28*28)
with gzip.open(os.path.join(self.current_dir, labels_filename), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
data[key] = (images, labels)
training = tuple(x[:self.train_samples] for x in data['train'])
validation = tuple(x[self.train_samples:] for x in data['train'])
test = tuple(data['test'])
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'wb') as file:
pickle.dump((training, validation, test), file)
def load(self):
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'rb') as file:
training, validation, test = pickle.load(file)
return training, validation, test
| 32.829268 | 110 | 0.686478 | 2,613 | 0.970654 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.156761 |
b40aad26fdc784cc5dfaf249f1c167e4160e4887 | 2,279 | py | Python | Exemple.py | LVWolff/Python_Lesson_2 | ece186f988c94a1aaa1656a1e6e1093c3d5b6251 | [
"MIT"
]
| null | null | null | Exemple.py | LVWolff/Python_Lesson_2 | ece186f988c94a1aaa1656a1e6e1093c3d5b6251 | [
"MIT"
]
| null | null | null | Exemple.py | LVWolff/Python_Lesson_2 | ece186f988c94a1aaa1656a1e6e1093c3d5b6251 | [
"MIT"
]
| null | null | null | #Задачи на циклы и оператор условия------
#----------------------------------------
'''
Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
for i in range(1, 6):
print(i, '0000000000000000000000000000000000000000000')
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
count = 0
for i in range(10):
user_data = int(input('Введите число: '))
if user_data == 5:
count += 1
print(count)
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
sum = 0
for i in range(1, 101):
sum += i
print(sum)
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
proiz = 1
for i in range(2, 11):
proiz *= i
print(proiz)
'''
Задача 5
Вывести цифры числа на каждой строчке.
'''
integer_number = 123456
start_del = len(str(integer_number)) - 1
delitel = 10 ** start_del
#print(integer_number % delitel, integer_number // delitel)
while integer_number > 0:
print(int(integer_number // delitel))
integer_number = integer_number % delitel
delitel /= 10
'''
Задача 6
Найти сумму цифр числа.
'''
integer_number = 123456
sum = 0
while integer_number > 0:
sum += integer_number % 10
integer_number = integer_number // 10
print(sum)
'''
Задача 7
Найти произведение цифр числа.
'''
integer_number = 123456
proiz = 1
while integer_number > 0:
proiz *= integer_number % 10
integer_number = integer_number // 10
print(proiz)
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
integer_number = 125254
while integer_number > 0:
if integer_number % 10 == 5:
print('Yes')
break
integer_number = integer_number // 10
else:
print('No')
'''
Задача 9
Найти максимальную цифру в числе
'''
integer_number = 125278954
max_num = integer_number % 10
while integer_number > 0:
max_num = max(max_num, integer_number % 10)
integer_number = integer_number // 10
print(max_num)
'''
Задача 10
Найти количество цифр 5 в числе
'''
integer_number = 125278954
count_num = 0
while integer_number > 0:
if integer_number % 10 == 5:
count_num += 1
integer_number = integer_number // 10
print(count_num)
| 18.087302 | 92 | 0.67749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,448 | 0.515119 |
b40bc88be7d9975ca6ad22574a73918dc37e3371 | 11,368 | py | Python | push_exp/main_CrouchSimulationForCOT.py | snumrl/DeepPushRecovery | dceb7f3114d4314cf3be875f43723255819e12a3 | [
"Apache-2.0"
]
| null | null | null | push_exp/main_CrouchSimulationForCOT.py | snumrl/DeepPushRecovery | dceb7f3114d4314cf3be875f43723255819e12a3 | [
"Apache-2.0"
]
| null | null | null | push_exp/main_CrouchSimulationForCOT.py | snumrl/DeepPushRecovery | dceb7f3114d4314cf3be875f43723255819e12a3 | [
"Apache-2.0"
]
| 1 | 2021-07-26T15:08:58.000Z | 2021-07-26T15:08:58.000Z | import os
import numpy as np
import time
import multiprocessing as mp
import csv
import socket
import datetime
import math
import glob
from pypushexp import PushSim
# # input - [recorded item]
# [weight] : 48
# [height] : 160
# [crouch_angle] (deg)
# [step_length_ratio]
# [halfcycle_duration_ratio]
# [push_step] : 8
# [push_duration] (sec) : .2
# [push_force] (N)
# [push_start_timing] (half gait cycle percent)
#
# # output
# [pushed_length] (m) : sim.out_pushed_length
# [pushed_steps] : sim.out_pushed_steps
# [push_strength] : abs(push_force * push_duration / weight)
# [step_length] (m) : sim.getPushedLength()
# [walking_speed] (m/s) : sim.getWalkingSpeed()
# [halfcycle_duration] (s) : sim.getStepLength() /sim.getWalkingSpeed()
#
# # output for hospital
# [distance] : pushed_length * 1000.
# [speed] : walking_speed * 1000.
# [force] : push_strength * 1000.
# [stride] : step_length * 1000.
# [start_timing_time_ic] = sim.start_timing_time_ic
# [mid_timing_time_ic] = sim.mid_timing_time_ic
# [start_timing_foot_ic] = sim.getStartTimingFootIC()
# [mid_timing_foot_ic] = sim.getMidTimingFootIC()
# [start_timing_time_fl] = sim.getStartTimingTimeFL()
# [mid_timing_time_fl] = sim.getMidTimingTimeFL()
# [start_timing_foot_fl] = sim.getStartTimingFootFL()
# [mid_timing_foot_fl] = sim.getMidTimingFootFL()
# # not used
# subject no
# sex
# left leg length
# right leg length
# stride
# speed
# experiment
# file name
# trial no
# push timing : 'left stance'
# push direction : 'from left'
# normalized push length
# push length until first step
# push end timing (time)
# push end timing (foot pos)
# return during first step
# push duration
# push start time
def gettimestringisoformat():
return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
def worker_simulation(sim, param):
try:
push_step, push_duration,\
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,\
weight, height, ith, q = param
# print(int(crouch_angle), step_length_ratio, walk_speed_ratio, push_force, push_start_timing)
sim.setParamedStepParams(int(crouch_angle), step_length_ratio, walk_speed_ratio)
sim.setPushParams(8, 0.2, 0., 0.)
print(step_length_ratio, walk_speed_ratio)
stopcode = sim.simulate()
# stopcode = 0
if stopcode in [0, 3, 4]:
cot = sim.getCostOfTransport()
walking_speed = sim.getWalkingSpeed()
q.put((ith, crouch_angle, walking_speed, cot))
except IndexError:
pass
def write_start(csvfilepath):
csvfile = open(csvfilepath, 'w')
csvfile.write('type,ith,crouch_angle,speed,cot\n')
return csvfile
def write_body(q, csvfile):
while True:
try:
ith, crouch_angle, walking_speed, cot = q.get(False)
csvfile.write('torque,%d,%s,%s,%s\n' % (ith, crouch_angle, walking_speed, cot))
csvfile.flush()
except:
break
def write_end(csvfile):
csvfile.close()
def simulate(sim, launch_order, num, option_str=''):
#=======================================================================
# settings
#=======================================================================
TEST = True if launch_order is None else False
# TEST = True
# TEST = False
weight = 72
height = 170
push_step = 8
push_duration = .2
test_params = [] # element: (crouch_angle, step_length_ratio, halfcycle_duration_ratio, push_force, push_start_timing)
# ===========================================================================
#
# ===========================================================================
if TEST:
# test
additional_str = ''
num = 2
# num = 5000
mean_crouch = [0, 20, 30, 60]
else:
# real
all_mean_crouch = [0, 20, 30, 60]
mean_crouch = [all_mean_crouch[launch_order % len(all_mean_crouch)]]
additional_str = '_%ddeg__push' % mean_crouch[0]
# if launch_order==0:
# param_opt_result = '130810_113234_0_60_push'
# additional_str = '_0_60_push'
# elif launch_order==2:
# param_opt_result = '130810_161152_0_30_60_push'
# additional_str = '_0_30_60_push'
# =======================================================================
# set logger
# =======================================================================
outDir = os.path.dirname(os.path.abspath(__file__)) + '/results/'
if not os.path.exists(outDir):
os.makedirs(outDir)
csvfilepath = outDir + 'COT_' +option_str + '_' + gettimestringisoformat() + '_' + str(num) + 'trials_' + socket.gethostname() + '.csv'
print('start logging at', gettimestringisoformat())
print()
print('<simulation setting>')
# =======================================================================
# test2 : multivariate normal distribution
# =======================================================================
stride_means = [1.1262070300, 0.9529737358, 0.9158506655, 0.8755451448]
speed_means = [0.9943359644, 0.8080297151, 0.7880050552, 0.7435198328]
stride_vars = [0.03234099289, 0.02508595114, 0.02772452640, 0.02817863267]
stride_speed_covars = [0.03779884365, 0.02225320798, 0.02906793442, 0.03000639027]
speed_vars = [0.06929309644, 0.04421889347, 0.04899931048, 0.05194827755]
# crouch angle
# mean_crouch = [0,20,30,60]
std_crouch = 1
# step length
motion_stride_bvh_after_default_param = 1.1886
experi_stride_mean = stride_means[launch_order]
experi_stride_std = math.sqrt(stride_vars[launch_order])
mean_length_ratio = experi_stride_mean / motion_stride_bvh_after_default_param
std_length_ratio = experi_stride_std / motion_stride_bvh_after_default_param
# walk speed
speed_bvh_after_default_param = 0.9134
experi_speed_mean = speed_means[launch_order]
experi_speed_std = math.sqrt(speed_vars[launch_order])
mean_speed_ratio = experi_speed_mean / speed_bvh_after_default_param
std_speed_ratio = experi_speed_std / speed_bvh_after_default_param
# push strength
mean_strength = .535
std_strength = .096
mean_force = -(mean_strength*weight/push_duration)
std_force = (std_strength*weight/push_duration)
# push timing
mean_timing = 34
std_timing = 21
if TEST:
np.set_printoptions(precision=4, linewidth=200)
# for i in range(len(mean_crouch)):
# mean = [mean_crouch[i], mean_length_ratio, mean_duration_ratio, mean_force, mean_timing, mean_crouch[i]]
# cov = np.diag( [std_crouch**2, std_length_ratio**2, std_duration_ratio**2, std_force**2, std_timing**2, 0])
for i in range(len(mean_crouch)):
mean = [mean_crouch[i], mean_length_ratio, mean_speed_ratio, mean_force, mean_timing, mean_crouch[i]]
cov = np.diag([0 , std_length_ratio**2, std_speed_ratio**2, std_force**2, std_timing**2, 0])
cov[1, 2] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
cov[2, 1] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
if len(test_params) == 0:
test_params = np.random.multivariate_normal(mean, cov, num)
else:
test_params = np.vstack((test_params, np.random.multivariate_normal(mean, cov, num)))
# no negative crouch angle
for i in range(len(test_params)):
test_params[i][0] = abs(test_params[i][0])
test_params[i][2] = abs(test_params[i][2])
test_params[i][3] = -abs(test_params[i][3])
# print(test_params)
print()
print('multivariate normal distribution')
print()
print('mean_crouch', mean_crouch)
print('std_crouch', std_crouch)
print()
print('motion_step_stride', motion_stride_bvh_after_default_param)
print('experi_step_length_mean', experi_stride_mean)
print('experi_step_length_std', experi_stride_std)
print('mean_length_ratio', mean_length_ratio)
print('std_length_ratio', std_length_ratio)
print()
print('motion_speed', speed_bvh_after_default_param)
print('experi_speed_mean', experi_speed_mean)
print('experi_speed_std', experi_speed_std)
print('mean_speed_ratio', mean_speed_ratio)
print('std_speed_ratio', std_speed_ratio)
print()
print('num', num)
print()
print('total # of simulations', len(test_params))
print()
# =======================================================================
# simulation
# =======================================================================
pt = time.time()
print('<start simulation>')
print('hostname %s ' % socket.gethostname())
print()
q = mp.Manager().Queue()
groupsize = 100
paramgroups = [[] for i in range( len(test_params)//groupsize + 1 )]
ith = 1
for i in range(len(test_params)):
crouch_angle = test_params[i][0]
step_length_ratio = test_params[i][1]
walk_speed_ratio = test_params[i][2]
push_force = test_params[i][3]
push_start_timing = test_params[i][4]
crouch_label = test_params[i][5]
paramgroups[i//groupsize].append((push_step, push_duration,
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,
weight, height, ith, q))
ith += 1
csvfile = write_start(csvfilepath)
for i in range(len(paramgroups)):
for j in range(len(paramgroups[i])):
print(j)
worker_simulation(sim, paramgroups[i][j])
write_body(q, csvfile)
write_end(csvfile)
print()
_s = time.time() - pt
_h = _s // 3600
_m = _s // 60
_s -= 60 * _m
_m -= 60 * _h
print('elapsed time = %d h:%d m:%d s' % (int(_h), int(_m), int(_s)))
print()
print('end logging at', gettimestringisoformat())
if __name__ == '__main__':
import sys
import re
option = sys.argv[1]
trial_num = int(sys.argv[2])
_metadata_dir = os.path.dirname(os.path.abspath(__file__)) + '/../data/metadata/'
_nn_finding_dir = os.path.dirname(os.path.abspath(__file__)) + '/../nn/*/'
nn_dir = None
if _nn_finding_dir is not None:
nn_dir = glob.glob(_nn_finding_dir + option)[0]
meta_file = _metadata_dir + option + '.txt'
sim = None
if 'muscle' in option:
sim = PushSim(meta_file, nn_dir+'/max.pt', nn_dir+'/max_muscle.pt')
else:
sim = PushSim(meta_file, nn_dir+'/max.pt')
if "all" in option:
simulate(sim, 0, trial_num, option)
simulate(sim, 1, trial_num, option)
simulate(sim, 2, trial_num, option)
simulate(sim, 3, trial_num, option)
else:
crouch = re.findall(r'crouch\d+', option)[0][6:]
simulate(sim, ['0', '20', '30', '60'].index(crouch), trial_num, option)
| 34.344411 | 139 | 0.598522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,980 | 0.350106 |
b40c71ed0a4ab0b122f61556dae6f792302c5678 | 776 | py | Python | lepiota/lepiota/urls.py | sgelias/lepiota | 4b30aa25ac5308229f6d41f1720e1af02557826e | [
"MIT"
]
| null | null | null | lepiota/lepiota/urls.py | sgelias/lepiota | 4b30aa25ac5308229f6d41f1720e1af02557826e | [
"MIT"
]
| null | null | null | lepiota/lepiota/urls.py | sgelias/lepiota | 4b30aa25ac5308229f6d41f1720e1af02557826e | [
"MIT"
]
| null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import include
from django.views.generic import TemplateView, RedirectView
urlpatterns = [
# Administration
path('admin/', admin.site.urls),
# Accounts
path('account/', include('account.urls', namespace='account')),
# Oauth2
path('api/v1/o/', include('oauth.urls', namespace='oauth2_provider')),
# General purpose
path('welcome/', TemplateView.as_view(template_name="welcome.html")),
path('', RedirectView.as_view(url="/welcome/")),
re_path(r'^$', RedirectView.as_view(url="/welcome/")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 29.846154 | 74 | 0.716495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.238402 |
b40c87bef3a1437769ac688f07452b9daed5f901 | 189 | py | Python | src/base/admin.py | dhavall13/Decode | 8b9cbec72ade727d62edb90c3a38152e0285fe90 | [
"MIT"
]
| null | null | null | src/base/admin.py | dhavall13/Decode | 8b9cbec72ade727d62edb90c3a38152e0285fe90 | [
"MIT"
]
| null | null | null | src/base/admin.py | dhavall13/Decode | 8b9cbec72ade727d62edb90c3a38152e0285fe90 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import Room, Topic, Message, User
admin.site.register(Room)
admin.site.register(Topic)
admin.site.register(Message)
admin.site.register(User)
| 23.625 | 46 | 0.804233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b40e2538e7eca239f3b41df3368718122f54c302 | 10,744 | py | Python | gorilla/config/_config.py | sunjiahao1999/gorilla-core | bf43e3a49c7f79834ae969db38edd50f17ef5288 | [
"MIT"
]
| 4 | 2021-07-28T04:50:26.000Z | 2021-09-23T12:59:01.000Z | gorilla/config/_config.py | sunjiahao1999/gorilla-core | bf43e3a49c7f79834ae969db38edd50f17ef5288 | [
"MIT"
]
| null | null | null | gorilla/config/_config.py | sunjiahao1999/gorilla-core | bf43e3a49c7f79834ae969db38edd50f17ef5288 | [
"MIT"
]
| 2 | 2021-08-05T04:01:12.000Z | 2021-12-25T02:17:03.000Z | # Copyright (c) Open-MMLab. All rights reserved.
import os
import json
import tempfile
import warnings
from typing import Optional
from argparse import Namespace
from addict import Dict
from ..utils import check_file
BASE_KEY = "_base_"
RESERVED_KEYS = ["filename", "text"]
class ConfigDict(Dict):
r"""ConfigDict based on Dict, which use to convert the config
file into config dict
"""
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(
f"`{self.__class__.__name__}` object has no attribute `{name}`"
)
except Exception as e:
ex = e
else:
return value
raise ex
class Config(object):
r"""A facility for config and config files.
It supports common file formats as configs: python/json/yaml. The interface
is the same as a dict object and also allows access config values as
attributes.
Example:
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
>>> cfg.a
1
>>> cfg.b
{"b1": [0, 1]}
>>> cfg.b.b1
[0, 1]
>>> cfg = Config.fromfile("./configs/test.py")
>>> cfg.filename
"/home/gorilla_lab/code/gorilla/configs/test.py"
>>> cfg.item4
"test"
>>> cfg
"Config [path: /home/gorilla_lab/code/gorilla/configs/test.py]: "
"{"item1": [1, 2], "item2": {"a": 0}, "item3": True, "item4": "test"}"
"""
def __init__(self,
cfg_dict: Optional[Dict] = None,
cfg_text: Optional[str] = None,
filename: Optional[str] = None):
if cfg_dict is None:
cfg_dict = dict()
elif not isinstance(cfg_dict, dict):
raise TypeError(f"cfg_dict must be a dict, "
f"but got {type(cfg_dict)}")
for key in cfg_dict:
if key in RESERVED_KEYS:
raise KeyError(f"{key} is reserved for config file")
super(Config, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
super(Config, self).__setattr__("_filename", filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, "r") as f:
text = f.read()
else:
text = ""
super(Config, self).__setattr__("_text", text)
@staticmethod
def _file2dict(filename: str):
filename = os.path.abspath(os.path.expanduser(filename))
check_file(filename)
from gorilla.fileio import load
cfg_dict = ConfigDict(load(filename))
with open(filename, "r") as f:
cfg_text = f.read()
# here cfg_dict is still the same as content in --config file,
# and the code block below read 4 sub-config file then merge into one.
if BASE_KEY in cfg_dict:
cfg_dir = os.path.dirname(filename)
base_filename = cfg_dict.pop(BASE_KEY)
base_filename = base_filename if isinstance(
base_filename, list) else [base_filename]
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
_cfg_dict, _cfg_text = Config._file2dict(os.path.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
if len(base_cfg_dict.keys() & c.keys()) > 0:
# e.g. sub-config file about dataset should not overlap with
# the one about model
raise KeyError("Duplicate key is not allowed among bases")
base_cfg_dict.update(c)
cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
# merge cfg_text
cfg_text_list.append(cfg_text)
cfg_text = "\n".join(cfg_text_list)
return cfg_dict, cfg_text
@staticmethod
def _merge_a_into_b(a, b):
r"""merge dict ``a`` into dict ``b`` (non-inplace).
Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid
in-place modifications.
Args:
a (dict): The source dict to be merged into ``b``.
b (dict): The origin dict to be fetch keys from ``a``.
Returns:
dict: The modified dict of ``b`` using ``a``.
Examples:
# Normally merge a into b.
>>> Config._merge_a_into_b(
... dict(obj=dict(a=2)), dict(obj=dict(a=1)))
{"obj": {"a": 2}}
"""
b = b.copy()
for k, v in a.items():
if isinstance(v, dict) and k in b:
allowed_types = dict
if not isinstance(b[k], allowed_types):
raise TypeError(
f"{k}={v} in child config cannot inherit from base "
f"because {k} is a dict in the child config but is of "
f"type {type(b[k])} in base config.")
b[k] = Config._merge_a_into_b(v, b[k])
else:
b[k] = v
return b
@staticmethod
def fromfile(filename: str):
r"""cfg_text is the text content read from 5 files, and cfg_dict is
a dict resolved by the text content.
"""
cfg_dict, cfg_text = Config._file2dict(filename)
return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
@staticmethod
def fromstring(cfg_str, file_format):
"""Generate config from config str.
Args:
cfg_str (str): Config str.
file_format (str): Config file format corresponding to the
config str. Only py/yml/yaml/json type are supported now!
Returns:
obj:`Config`: Config obj.
"""
if file_format not in [".py", ".json", ".yaml", ".yml"]:
raise IOError("Only py/yml/yaml/json type are supported now!")
if file_format != ".py" and "dict(" in cfg_str:
# check if users specify a wrong suffix for python
warnings.warn(
"Please check 'file_format', the file format may be .py")
with tempfile.NamedTemporaryFile("w", suffix=file_format) as temp_file:
temp_file.write(cfg_str)
temp_file.flush()
cfg = Config.fromfile(temp_file.name)
return cfg
@property
def filename(self) -> str:
return self._filename
@property
def text(self) -> str:
return self._text
def __repr__(self) -> str:
content = f"Config (path: {self.filename})\n"
content += json.dumps(self._cfg_dict, indent=4, ensure_ascii=False)
return content
def __len__(self) -> int:
return len(self._cfg_dict)
def __getattr__(self, name: str):
return getattr(self._cfg_dict, name)
def __getitem__(self, name: str):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self, file: Optional[str] = None, **kwargs):
cfg_dict = self._cfg_dict.to_dict()
from gorilla.fileio import dump
if file is None:
# output the content
file_format = self.filename.split(".")[-1]
if file_format == "py":
return self.text
else:
return dump(cfg_dict, file_format=file_format, **kwargs)
else:
if file.endswith("py"):
with open(file, "w") as f:
f.write(self.text)
else:
dump(cfg_dict, file, **kwargs)
def merge_from_dict(self, options: Dict):
r"""Merge list into cfg_dict.
Merge the dict parsed by MultipleKVAction into this cfg.
Examples:
>>> options = {"model.backbone.depth": 50,
... "model.backbone.with_cp":True}
>>> cfg = Config(dict(model=dict(backbone=dict(type="ResNet"))))
>>> cfg.merge_from_dict(options)
>>> cfg_dict = super(Config, self).__getattribute__("_cfg_dict")
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
# Merge list element
>>> cfg = Config(dict(pipeline=[
... dict(type="LoadImage"), dict(type="LoadAnnotations")]))
>>> options = dict(pipeline={"0": dict(type="SelfLoadImage")})
Args:
options (dict): dict of configs to merge from.
"""
option_cfg_dict = {}
for full_key, v in options.items():
if v is None: # handle the case when a parameter simultaneously appears in argparse and config file
continue
d = option_cfg_dict
key_list = full_key.split(".")
for subkey in key_list[:-1]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[-1]
d[subkey] = v
cfg_dict = self._cfg_dict
cfg_dict = Config._merge_a_into_b(option_cfg_dict, cfg_dict)
# NOTE: strange phenomenon
# self._cfg_dict = cfg_dict
super(Config, self).__setattr__("_cfg_dict", cfg_dict)
def merge_cfg_and_args(cfg: Optional[Config] = None,
args: Optional[Namespace] = None) -> Config:
r"""merge args and cfg into a Config by calling 'merge_from_dict' func
Args:
cfg (Config, optional): Config from cfg file.
args (Namespace, optional): Argument parameters input.
Returns:
Config: Merged Config
"""
assert cfg is not None or args is not None, "'cfg' or 'args' can not be None simultaneously"
if cfg is None:
cfg = Config()
else:
assert isinstance(
cfg, Config
), f"'cfg' must be None or gorilla.Config, but got {type(cfg)}"
if args is None:
args = Namespace()
else:
assert isinstance(
args, Namespace
), f"'args' must be None or argsparse.Namespace, but got {type(args)}"
# convert namespace into dict
args_dict = vars(args)
cfg.merge_from_dict(args_dict)
return cfg
| 34.770227 | 112 | 0.560964 | 9,504 | 0.884587 | 0 | 0 | 4,165 | 0.387658 | 0 | 0 | 4,267 | 0.397152 |
b40e4f7e84bc53160bafd291d5c8ea6b4b1f43bd | 2,643 | py | Python | Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py | karim-awad/kaspa | 701d935dd215bfd9a4810a4430973b33fecec257 | [
"MIT"
]
| null | null | null | Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py | karim-awad/kaspa | 701d935dd215bfd9a4810a4430973b33fecec257 | [
"MIT"
]
| null | null | null | Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py | karim-awad/kaspa | 701d935dd215bfd9a4810a4430973b33fecec257 | [
"MIT"
]
| null | null | null | from Kaspa.modules.abstract_modules.abstractSubmodule import AbstractSubmodule
from Kaspa.modules.exceptions.impossibleActionError import ImpossibleActionError
from Kaspa.config import Config
class SpotifyModuleEn(AbstractSubmodule):
module_name = "Spotify"
language = "en"
key_regexes = dict()
def __init__(self):
self.key_regexes = {'(?i).*?(?=continue)+.+?(?=playback)+.': self.action_continue_playback,
'(?i).*?(?=pause)+.': self.action_play,
'(?i).*?(?=play)+.': self.action_play,
'(?i).*?(?=next)+.': self.action_next,
'(?i).*?(?=stop)+.': self.action_pause,
'(?i).*?(?=what)+.+?(?=song)+.': self.action_song_info}
def action_continue_playback(self, query):
communicator = query.get_communicator()
self.main_module.continue_playback()
communicator.say("I am now continuing your music playback.")
return
def action_pause(self, query):
communicator = query.get_communicator()
self.main_module.pause()
communicator.say("Music paused.")
return
def action_play(self, query):
communicator = query.get_communicator()
text = query.get_text()
try:
self.action_continue_playback(query)
return
except ImpossibleActionError:
pass
if self.main_module.current_song() is None:
self.main_module.play_saved()
communicator.say("Okay, playing your last added songs.")
return
# fetch all playlist macros from config file and search for matches in the query
playlists = Config.get_instance().get_section_content('playlists')
for playlist in playlists:
if playlist[0].lower() in text.lower():
self.main_module.play_playlist(playlist[1])
communicator.say("Okay, I'll now play the playlist" + playlist[0] + ".")
return
self.main_module.play()
communicator.say("Okay")
return
def action_next(self, query):
communicator = query.get_communicator()
self.main_module.next()
communicator.say("Okay")
return
def action_song_info(self, query):
communicator = query.get_communicator()
if self.main_module.current_song():
title, artist = self.main_module.current_song()
communicator.say("The song is " + title + " by " + artist + ".")
else:
communicator.say("There is no music loaded right now.")
| 36.205479 | 99 | 0.596292 | 2,448 | 0.92622 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.172153 |
b40e9592fe62c2017e79612d2b201dbc82a4fb4e | 2,768 | py | Python | screenshot-server/app/main.py | martindines/ScreenshotServer | 21d1529157f4625cd26196000c4a30342ab4d713 | [
"MIT"
]
| 1 | 2019-12-31T18:43:08.000Z | 2019-12-31T18:43:08.000Z | screenshot-server/app/main.py | martindines/ScreenshotServer | 21d1529157f4625cd26196000c4a30342ab4d713 | [
"MIT"
]
| 1 | 2019-12-31T19:35:24.000Z | 2019-12-31T19:35:24.000Z | screenshot-server/app/main.py | martindines/ScreenshotServer | 21d1529157f4625cd26196000c4a30342ab4d713 | [
"MIT"
]
| null | null | null | import os
import sys
import pathlib
from utilities import get_random_hash
from flask import Flask, flash, request, redirect, url_for, send_from_directory, jsonify, Response
UPLOAD_FOLDER = os.environ.get('UPLOAD_FOLDER') if os.environ.get('UPLOAD_FOLDER') else '/tmp'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
SECRET = os.environ.get('SECRET')
app = Flask(__name__)
app.config['SERVER_NAME'] = os.environ.get('SERVER_NAME')
def allowed_file(filename):
return '.' in filename and \
pathlib.Path(filename).suffix[1:] in ALLOWED_EXTENSIONS
def is_secret_valid(guess):
try:
if guess == SECRET:
return True
return False
except KeyError:
return False
def verify_auth_headers():
if 'secret' in request.headers:
guess = request.headers['secret']
return is_secret_valid(guess)
return False
def upload_file_and_return_external_path(file):
extension = pathlib.Path(file.filename).suffix
filename = get_random_hash() + extension
filepath = os.path.join(UPLOAD_FOLDER, filename)
if os.path.exists(filepath):
upload_file_and_return_external_path(file)
else:
file.save(filepath)
return url_for('upload', filename=filename, _external=True)
@app.route('/')
def index():
return '''
<!doctype html>
'''
@app.route('/<filename>', methods=['GET'])
def upload(filename):
if allowed_file(filename):
return send_from_directory(UPLOAD_FOLDER, filename)
@app.route('/api/auth', methods=['GET'])
def api_auth():
if verify_auth_headers():
return jsonify(
success=True
)
return jsonify(
success=False,
message='Invalid secret'
)
@app.route('/api/upload', methods=['POST'])
def api_upload():
if verify_auth_headers():
# check if the post request has the file part
if 'file' not in request.files:
return jsonify(
success=False,
message='No file present'
)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify(
success=False,
message='Filename missing'
)
if file and allowed_file(file.filename):
path = upload_file_and_return_external_path(file)
return jsonify(
success=True,
path=path
)
else:
return jsonify(
success=False,
message='File type not allowed'
)
return jsonify(
success=False,
message='Invalid secret'
)
| 24.936937 | 98 | 0.610549 | 0 | 0 | 0 | 0 | 1,468 | 0.530347 | 0 | 0 | 447 | 0.161488 |
b40ee079a577a77555888197b34380d7e63acfd3 | 517 | py | Python | src/waldur_mastermind/notifications/migrations/0002_json_field.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
]
| 2 | 2017-01-20T15:26:25.000Z | 2017-08-03T04:38:08.000Z | src/waldur_mastermind/notifications/migrations/0002_json_field.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
]
| null | null | null | src/waldur_mastermind/notifications/migrations/0002_json_field.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2 on 2022-01-31 14:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='emails',
field=models.JSONField(),
),
migrations.AlterField(
model_name='notification',
name='query',
field=models.JSONField(),
),
]
| 21.541667 | 45 | 0.560928 | 426 | 0.823985 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.226306 |
b41042e5988e8d27b58649ccaf22e396c4b031cb | 2,800 | py | Python | gitgoggles/utils.py | nowells/git-goggles | 022dc0cd6dfe8f1641ccb33e85ab05309dba7dbf | [
"MIT"
]
| 13 | 2015-03-10T08:48:51.000Z | 2019-04-16T09:06:55.000Z | gitgoggles/utils.py | nowells/git-goggles | 022dc0cd6dfe8f1641ccb33e85ab05309dba7dbf | [
"MIT"
]
| null | null | null | gitgoggles/utils.py | nowells/git-goggles | 022dc0cd6dfe8f1641ccb33e85ab05309dba7dbf | [
"MIT"
]
| 3 | 2016-04-29T05:38:56.000Z | 2020-07-06T13:04:05.000Z | import copy
import subprocess
import sys
import unicodedata
def disable_colored_func(text, *args, **kwargs):
return text
try:
from termcolor import colored as colored_func
except ImportError:
print 'You should run "pip install termcolor" to fully utilize these utilities.'
colored_func = disable_colored_func
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
if not supports_color():
colored_func = disable_colored_func
class Colored(object):
disabled = False
def __call__(self, *args, **kwargs):
if self.disabled:
return disable_colored_func(*args, **kwargs)
return colored_func(*args, **kwargs)
colored = Colored()
def force_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
# Normalize the unicode data to have characters that in NFKD format would be represented by 2 characters, instead of 1.
obj = unicodedata.normalize('NFKC', obj)
return obj
def force_str(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, str):
obj = obj.encode(encoding)
return obj
def console(obj):
sys.stdout.write(force_str(obj))
class AccumulatorDict(dict):
def __init__(self, default, *args, **kwargs):
self.__default = default
def __getitem__(self, key):
if key not in self:
self[key] = copy.copy(self.__default)
return super(AccumulatorDict, self).__getitem__(key)
def memoize(func):
def _(self, *args, **kwargs):
if not hasattr(self, '__memoize_cache'):
self.__memoize_cache = AccumulatorDict(AccumulatorDict({}))
key = tuple([ tuple(args), tuple([ tuple([x, y]) for x, y in kwargs.items() ]) ])
if key not in self.__memoize_cache[func]:
self.__memoize_cache[func][key] = func(self, *args, **kwargs)
return self.__memoize_cache[func][key]
return _
def terminal_dimensions():
try:
# This probably does not work on windows, but it should work just about
# everywhere else.
p = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate(None)
stdout = force_unicode(stdout)
stderr = force_unicode(stderr)
rows, columns = [ int(x) for x in stdout.split() ]
except:
rows, columns = 40, 79
return rows, columns
| 32.183908 | 127 | 0.658571 | 495 | 0.176786 | 0 | 0 | 0 | 0 | 0 | 0 | 501 | 0.178929 |
b410813c6c4297c46c6ca2597443a122ba6dda59 | 4,308 | py | Python | test/unit/tools/test_basisconstructors.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
]
| 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | test/unit/tools/test_basisconstructors.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
]
| 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | test/unit/tools/test_basisconstructors.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
]
| 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | import numpy as np
import pygsti.baseobjs.basisconstructors as bc
from ..util import BaseCase
class BasisConstructorsTester(BaseCase):
def test_GellMann(self):
id2x2 = np.array([[1, 0], [0, 1]])
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1.0j], [1.0j, 0]])
sigmaz = np.array([[1, 0], [0, -1]])
# Gell-Mann 2x2 matrices should just be the sigma matrices
GM2_mxs = bc.gm_matrices_unnormalized(2)
self.assertTrue(len(GM2_mxs) == 4)
self.assertArraysAlmostEqual(GM2_mxs[0], id2x2)
self.assertArraysAlmostEqual(GM2_mxs[1], sigmax)
self.assertArraysAlmostEqual(GM2_mxs[2], sigmay)
self.assertArraysAlmostEqual(GM2_mxs[3], sigmaz)
with self.assertRaises(TypeError):
bc.gm_matrices_unnormalized("FooBar") # arg must be tuple,list,or int
# Normalized Gell-Mann 2x2 matrices should just be the sigma matrices / sqrt(2)
NGM2_mxs = bc.gm_matrices(2)
self.assertTrue(len(NGM2_mxs) == 4)
self.assertArraysAlmostEqual(NGM2_mxs[0], id2x2 / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[1], sigmax / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[2], sigmay / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[3], sigmaz / np.sqrt(2))
#TODO: test 4x4 matrices?
def test_orthogonality(self):
#Gell Mann
dim = 5
mxs = bc.gm_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
gm_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
gm_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
#Note: conjugate transpose not needed since mxs are Hermitian
self.assertArraysAlmostEqual(gm_trMx, np.identity(N, 'complex'))
#Std Basis
dim = 5
mxs = bc.std_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
std_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
std_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
self.assertArraysAlmostEqual(std_trMx, np.identity(N, 'complex'))
#Pauli-product basis
dim = 4
mxs = bc.pp_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
with self.assertRaises(TypeError):
bc.pp_matrices("Foobar") # dim must be an int
with self.assertRaises(ValueError):
bc.pp_matrices(3) # dim must be a power of 4
specialCase = bc.pp_matrices(1) # single 1x1 identity mx
self.assertEqual(specialCase, [np.identity(1, 'complex')])
pp_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
pp_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
#Note: conjugate transpose not needed since mxs are Hermitian
self.assertArraysAlmostEqual(pp_trMx, np.identity(N, 'complex'))
def test_basis_misc(self):
mx = bc.pp_matrices(1) # was [1] but this shouldn't be allowed
self.assertArraysAlmostEqual(np.identity(1, 'complex'), mx)
def test_pp_maxweight(self):
pp2Max1 = bc.pp_matrices(2, max_weight=1) # using max_weight
pp2 = bc.pp_matrices(2) # For 2x2, should match max_weight=1
for mxMax, mx in zip(pp2Max1, pp2):
self.assertArraysAlmostEqual(mxMax, mx)
pp4Max1 = bc.pp_matrices(4, max_weight=1)
pp4 = bc.pp_matrices(4)
pp4Subset = [pp4[0], pp4[1], pp4[2], pp4[3], pp4[4], pp4[8], pp4[12]] # Pull out II,IX,IY,IZ,XI,YI,ZI
for mxMax, mxSub in zip(pp4Max1, pp4Subset):
self.assertArraysAlmostEqual(mxMax, mxSub)
def test_qt_dim1(self):
qutrit1 = bc.qt_matrices(1) # special case when dim==1
self.assertArraysAlmostEqual(np.identity(1, 'd'), qutrit1)
def test_qt_orthonorm(self):
mxs = bc.qt_matrices(3)
for i in range(len(mxs)):
for j in range(len(mxs)):
dp = np.vdot(mxs[i], mxs[j])
if i == j:
self.assertAlmostEqual(dp, 1.0)
else:
self.assertAlmostEqual(dp, 0.0)
| 40.261682 | 109 | 0.597957 | 4,210 | 0.977252 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.154596 |
b4130d04b43c706ebb56a9d6ede2201a268db5d7 | 7,913 | py | Python | tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
]
| 384 | 2017-02-21T18:38:04.000Z | 2022-02-22T07:30:25.000Z | tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
]
| 15 | 2017-03-01T20:18:43.000Z | 2020-05-07T10:33:51.000Z | tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
]
| 81 | 2017-02-21T19:31:19.000Z | 2022-02-22T07:30:24.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def _assertDictEquals(self, d1, d2):
self.assertEqual(len(d1), len(d2))
for k, v in six.iteritems(d1):
self.assertTrue(k in d2, k)
self.assertEquals(v, d2[k], d2[k])
def testEmpty(self):
hparams = hparam.HParams()
self._assertDictEquals({}, hparams.values())
hparams.parse('')
self._assertDictEquals({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6')
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
expected_str = '[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'
self.assertEquals(expected_str, str(hparams.__str__()))
self.assertEquals(expected_str, str(hparams))
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('aaa=12')
self._assertDictEquals(
{'aaa': 12, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('c_c=relu4,b=-2.0e10')
self._assertDictEquals({'aaa': 12, 'b': -2.0e10, 'c_c': 'relu4'},
hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(-2.0e10, hparams.b)
self.assertEquals('relu4', hparams.c_c)
hparams.parse('c_c=,b=0,')
self._assertDictEquals({'aaa': 12, 'b': 0, 'c_c': ''}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(0.0, hparams.b)
self.assertEquals('', hparams.c_c)
hparams.parse('c_c=2.3",b=+2,')
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals(12, hparams2.aaa)
self.assertEquals(2.0, hparams2.b)
self.assertEquals('2.3"', hparams2.c_c)
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEquals(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEquals() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEquals(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self._assertDictEquals({'aaa': [1], 'b': [2.0, 3.0], 'c_c': ['relu6']},
hparams.values())
self.assertEquals([1], hparams.aaa)
self.assertEquals([2.0, 3.0], hparams.b)
self.assertEquals(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEquals([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEquals([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEquals(['relu4', 'relu12'], hparams.c_c)
self.assertEquals([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEquals([-34], hparams.aaa)
self.assertEquals([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEquals([3], hparams.aaa)
self.assertEquals(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals([3], hparams2.aaa)
self.assertEquals([1.0], hparams2.b)
self.assertEquals(['_12', '3\'4"'], hparams2.c_c)
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6', 'd': True}, hparams.values())
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self._assertDictEquals(
{'aaa': 12, 'b': 3.0, 'c_c': 'relu4', 'd': False}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(3.0, hparams.b)
self.assertEquals('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEquals(12, hparams2.aaa)
self.assertEquals(3.0, hparams2.b)
self.assertEquals('relu4', hparams2.c_c)
self.assertEquals(False, hparams2.d)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
if __name__ == '__main__':
test.main()
| 40.372449 | 80 | 0.65841 | 6,923 | 0.874889 | 0 | 0 | 0 | 0 | 0 | 0 | 2,086 | 0.263617 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.