max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
sweetwaterschools.org/boundaries/bundle.py | sdrdl/sdrdl-ambry-bundles | 0 | 6630051 | <gh_stars>0
'''
'''
from ambry.bundle.loader import GeoBuildBundle
class Bundle(GeoBuildBundle):
''' '''
def build(self):
super(Bundle, self).build()
self.build_acs_crosswalk()
return True
def build_acs_crosswalk(self):
from ambry.geo.util import find_geo_containment
def generate_geometries():
blocks = self.partitions.find(table='sws_boundaries')
lr = self.init_log_rate(3000)
# Note, ogc_fid is the primary key. The id column is created by the shapefile.
for i,block in enumerate(blocks.query("SELECT AsText(geometry) AS wkt, id FROM sws_boundaries")):
lr('Load rtree')
if self.run_args.test and i > 200:
break
yield block['id'],block['id'], block['wkt']
def generate_blockgroups():
"""Generate centroids of the 2012 ACS blockgroups"""
block_groups = self.library.dep('bg2012').partition
for row in block_groups.query("""SELECT
gvid,
X(Transform(Centroid(geometry), 4326)) AS lon,
Y(Transform(Centroid(geometry), 4326)) as lat,
MbrMinX(geometry) AS x_min,
MbrMinY(geometry) AS y_min,
MbrMaxX(geometry) AS x_max,
MbrMaxY(geometry) AS y_max
FROM blockgroups
"""):
if row['lon'] and row['lat']:
yield (row['x_min'], row['y_min'], row['x_max'], row['y_max']), row['gvid']
def mark_contains():
p = self.partitions.find_or_new(table='acs_cross')
p.clean()
with p.inserter() as ins:
while True:
(p,point_obj,geometry, poly_obj) = yield # Get a value back from find_geo_containment
d = {
'gvid': point_obj,
'sws_boundaries_id': poly_obj
}
ins.insert(d)
self.log("Linking ACS tracts to boundaries")
find_geo_containment(generate_geometries(), generate_blockgroups(), mark_contains(), method = 'intersects')
| '''
'''
from ambry.bundle.loader import GeoBuildBundle
class Bundle(GeoBuildBundle):
''' '''
def build(self):
super(Bundle, self).build()
self.build_acs_crosswalk()
return True
def build_acs_crosswalk(self):
from ambry.geo.util import find_geo_containment
def generate_geometries():
blocks = self.partitions.find(table='sws_boundaries')
lr = self.init_log_rate(3000)
# Note, ogc_fid is the primary key. The id column is created by the shapefile.
for i,block in enumerate(blocks.query("SELECT AsText(geometry) AS wkt, id FROM sws_boundaries")):
lr('Load rtree')
if self.run_args.test and i > 200:
break
yield block['id'],block['id'], block['wkt']
def generate_blockgroups():
"""Generate centroids of the 2012 ACS blockgroups"""
block_groups = self.library.dep('bg2012').partition
for row in block_groups.query("""SELECT
gvid,
X(Transform(Centroid(geometry), 4326)) AS lon,
Y(Transform(Centroid(geometry), 4326)) as lat,
MbrMinX(geometry) AS x_min,
MbrMinY(geometry) AS y_min,
MbrMaxX(geometry) AS x_max,
MbrMaxY(geometry) AS y_max
FROM blockgroups
"""):
if row['lon'] and row['lat']:
yield (row['x_min'], row['y_min'], row['x_max'], row['y_max']), row['gvid']
def mark_contains():
p = self.partitions.find_or_new(table='acs_cross')
p.clean()
with p.inserter() as ins:
while True:
(p,point_obj,geometry, poly_obj) = yield # Get a value back from find_geo_containment
d = {
'gvid': point_obj,
'sws_boundaries_id': poly_obj
}
ins.insert(d)
self.log("Linking ACS tracts to boundaries")
find_geo_containment(generate_geometries(), generate_blockgroups(), mark_contains(), method = 'intersects') | en | 0.652232 | # Note, ogc_fid is the primary key. The id column is created by the shapefile. Generate centroids of the 2012 ACS blockgroups SELECT gvid, X(Transform(Centroid(geometry), 4326)) AS lon, Y(Transform(Centroid(geometry), 4326)) as lat, MbrMinX(geometry) AS x_min, MbrMinY(geometry) AS y_min, MbrMaxX(geometry) AS x_max, MbrMaxY(geometry) AS y_max FROM blockgroups # Get a value back from find_geo_containment | 2.319281 | 2 |
mapproxy/source/wms.py | autra/mapproxy | 347 | 6630052 | <reponame>autra/mapproxy<filename>mapproxy/source/wms.py
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Retrieve maps/information from WMS servers.
"""
import sys
from mapproxy.request.base import split_mime_type
from mapproxy.cache.legend import Legend, legend_identifier
from mapproxy.image import make_transparent, ImageSource, SubImageSource, bbox_position_in_image
from mapproxy.image.merge import concat_legends
from mapproxy.image.transform import ImageTransformer
from mapproxy.layer import MapExtent, DefaultMapExtent, BlankImage, LegendQuery, MapQuery, MapLayer
from mapproxy.source import InfoSource, SourceError, LegendSource
from mapproxy.client.http import HTTPClientError
from mapproxy.util.py import reraise_exception
import logging
log = logging.getLogger('mapproxy.source.wms')
class WMSSource(MapLayer):
supports_meta_tiles = True
def __init__(self, client, image_opts=None, coverage=None, res_range=None,
transparent_color=None, transparent_color_tolerance=None,
supported_srs=None, supported_formats=None, fwd_req_params=None,
error_handler=None):
MapLayer.__init__(self, image_opts=image_opts)
self.client = client
self.supported_srs = supported_srs or []
self.supported_formats = supported_formats or []
self.fwd_req_params = fwd_req_params or set()
self.transparent_color = transparent_color
self.transparent_color_tolerance = transparent_color_tolerance
if self.transparent_color:
self.image_opts.transparent = True
self.coverage = coverage
self.res_range = res_range
if self.coverage:
self.extent = MapExtent(self.coverage.bbox, self.coverage.srs)
else:
self.extent = DefaultMapExtent()
self.error_handler = error_handler
def is_opaque(self, query):
"""
Returns true if we are sure that the image is not transparent.
"""
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
return False
if self.image_opts.transparent:
return False
if self.opacity is not None and (0.0 < self.opacity < 0.99):
return False
if not self.coverage:
# not transparent and no coverage
return True
if self.coverage.contains(query.bbox, query.srs):
# not transparent and completely inside coverage
return True
return False
def get_map(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
raise BlankImage()
if self.coverage and not self.coverage.intersects(query.bbox, query.srs):
raise BlankImage()
try:
resp = self._get_map(query)
if self.transparent_color:
resp = make_transparent(resp, self.transparent_color,
self.transparent_color_tolerance)
resp.opacity = self.opacity
return resp
except HTTPClientError as e:
if self.error_handler:
resp = self.error_handler.handle(e.response_code, query)
if resp:
return resp
log.warning('could not retrieve WMS map: %s', e.full_msg or e)
reraise_exception(SourceError(e.args[0]), sys.exc_info())
def _get_map(self, query):
format = self.image_opts.format
if not format:
format = query.format
if self.supported_formats and format not in self.supported_formats:
format = self.supported_formats[0]
if self.supported_srs:
# srs can be equal while still having a different srs_code (EPSG:3857/900913), make sure to use a supported srs_code
request_srs = None
for srs in self.supported_srs:
if query.srs == srs:
request_srs = srs
break
if request_srs is None:
return self._get_transformed(query, format)
if query.srs.srs_code != request_srs.srs_code:
query.srs = request_srs
if self.extent and not self.extent.contains(MapExtent(query.bbox, query.srs)):
return self._get_sub_query(query, format)
resp = self.client.retrieve(query, format)
return ImageSource(resp, size=query.size, image_opts=self.image_opts)
def _get_sub_query(self, query, format):
size, offset, bbox = bbox_position_in_image(query.bbox, query.size, self.extent.bbox_for(query.srs))
if size[0] == 0 or size[1] == 0:
raise BlankImage()
src_query = MapQuery(bbox, size, query.srs, format, dimensions=query.dimensions)
resp = self.client.retrieve(src_query, format)
return SubImageSource(resp, size=query.size, offset=offset, image_opts=self.image_opts)
def _get_transformed(self, query, format):
dst_srs = query.srs
src_srs = self.supported_srs.best_srs(dst_srs)
dst_bbox = query.bbox
src_bbox = dst_srs.transform_bbox_to(src_srs, dst_bbox)
src_width, src_height = src_bbox[2]-src_bbox[0], src_bbox[3]-src_bbox[1]
ratio = src_width/src_height
dst_size = query.size
xres, yres = src_width/dst_size[0], src_height/dst_size[1]
if xres < yres:
src_size = dst_size[0], int(dst_size[0]/ratio + 0.5)
else:
src_size = int(dst_size[1]*ratio +0.5), dst_size[1]
src_query = MapQuery(src_bbox, src_size, src_srs, format, dimensions=query.dimensions)
if self.coverage and not self.coverage.contains(src_bbox, src_srs):
img = self._get_sub_query(src_query, format)
else:
resp = self.client.retrieve(src_query, format)
img = ImageSource(resp, size=src_size, image_opts=self.image_opts)
img = ImageTransformer(src_srs, dst_srs).transform(img, src_bbox,
query.size, dst_bbox, self.image_opts)
img.format = format
return img
def _is_compatible(self, other, query):
if not isinstance(other, WMSSource):
return False
if self.opacity is not None or other.opacity is not None:
return False
if self.supported_srs != other.supported_srs:
return False
if self.supported_formats != other.supported_formats:
return False
if self.transparent_color != other.transparent_color:
return False
if self.transparent_color_tolerance != other.transparent_color_tolerance:
return False
if self.coverage != other.coverage:
return False
if (query.dimensions_for_params(self.fwd_req_params) !=
query.dimensions_for_params(other.fwd_req_params)):
return False
return True
def combined_layer(self, other, query):
if not self._is_compatible(other, query):
return None
client = self.client.combined_client(other.client, query)
if not client:
return None
return WMSSource(client, image_opts=self.image_opts,
transparent_color=self.transparent_color,
transparent_color_tolerance=self.transparent_color_tolerance,
supported_srs=self.supported_srs,
supported_formats=self.supported_formats,
res_range=None, # layer outside res_range should already be filtered out
coverage=self.coverage,
fwd_req_params=self.fwd_req_params,
)
class WMSInfoSource(InfoSource):
def __init__(self, client, fi_transformer=None, coverage=None):
self.client = client
self.fi_transformer = fi_transformer
self.coverage = coverage
def get_info(self, query):
if self.coverage and not self.coverage.contains(query.coord, query.srs):
return None
doc = self.client.get_info(query)
if self.fi_transformer:
doc = self.fi_transformer(doc)
return doc
class WMSLegendSource(LegendSource):
def __init__(self, clients, legend_cache, static=False):
self.clients = clients
self.identifier = legend_identifier([c.identifier for c in self.clients])
self._cache = legend_cache
self._size = None
self.static = static
@property
def size(self):
if not self._size:
legend = self.get_legend(LegendQuery(format='image/png', scale=None))
# TODO image size without as_image?
self._size = legend.as_image().size
return self._size
def get_legend(self, query):
if self.static:
# prevent caching of static legends for different scales
legend = Legend(id=self.identifier, scale=None)
else:
legend = Legend(id=self.identifier, scale=query.scale)
if not self._cache.load(legend):
legends = []
error_occured = False
for client in self.clients:
try:
legends.append(client.get_legend(query))
except HTTPClientError as e:
error_occured = True
log.error(e.args[0])
except SourceError as e:
error_occured = True
# TODO errors?
log.error(e.args[0])
format = split_mime_type(query.format)[1]
legend = Legend(source=concat_legends(legends, format=format),
id=self.identifier, scale=query.scale)
if not error_occured:
self._cache.store(legend)
return legend.source
| # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Retrieve maps/information from WMS servers.
"""
import sys
from mapproxy.request.base import split_mime_type
from mapproxy.cache.legend import Legend, legend_identifier
from mapproxy.image import make_transparent, ImageSource, SubImageSource, bbox_position_in_image
from mapproxy.image.merge import concat_legends
from mapproxy.image.transform import ImageTransformer
from mapproxy.layer import MapExtent, DefaultMapExtent, BlankImage, LegendQuery, MapQuery, MapLayer
from mapproxy.source import InfoSource, SourceError, LegendSource
from mapproxy.client.http import HTTPClientError
from mapproxy.util.py import reraise_exception
import logging
log = logging.getLogger('mapproxy.source.wms')
class WMSSource(MapLayer):
supports_meta_tiles = True
def __init__(self, client, image_opts=None, coverage=None, res_range=None,
transparent_color=None, transparent_color_tolerance=None,
supported_srs=None, supported_formats=None, fwd_req_params=None,
error_handler=None):
MapLayer.__init__(self, image_opts=image_opts)
self.client = client
self.supported_srs = supported_srs or []
self.supported_formats = supported_formats or []
self.fwd_req_params = fwd_req_params or set()
self.transparent_color = transparent_color
self.transparent_color_tolerance = transparent_color_tolerance
if self.transparent_color:
self.image_opts.transparent = True
self.coverage = coverage
self.res_range = res_range
if self.coverage:
self.extent = MapExtent(self.coverage.bbox, self.coverage.srs)
else:
self.extent = DefaultMapExtent()
self.error_handler = error_handler
def is_opaque(self, query):
"""
Returns true if we are sure that the image is not transparent.
"""
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
return False
if self.image_opts.transparent:
return False
if self.opacity is not None and (0.0 < self.opacity < 0.99):
return False
if not self.coverage:
# not transparent and no coverage
return True
if self.coverage.contains(query.bbox, query.srs):
# not transparent and completely inside coverage
return True
return False
def get_map(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
raise BlankImage()
if self.coverage and not self.coverage.intersects(query.bbox, query.srs):
raise BlankImage()
try:
resp = self._get_map(query)
if self.transparent_color:
resp = make_transparent(resp, self.transparent_color,
self.transparent_color_tolerance)
resp.opacity = self.opacity
return resp
except HTTPClientError as e:
if self.error_handler:
resp = self.error_handler.handle(e.response_code, query)
if resp:
return resp
log.warning('could not retrieve WMS map: %s', e.full_msg or e)
reraise_exception(SourceError(e.args[0]), sys.exc_info())
def _get_map(self, query):
format = self.image_opts.format
if not format:
format = query.format
if self.supported_formats and format not in self.supported_formats:
format = self.supported_formats[0]
if self.supported_srs:
# srs can be equal while still having a different srs_code (EPSG:3857/900913), make sure to use a supported srs_code
request_srs = None
for srs in self.supported_srs:
if query.srs == srs:
request_srs = srs
break
if request_srs is None:
return self._get_transformed(query, format)
if query.srs.srs_code != request_srs.srs_code:
query.srs = request_srs
if self.extent and not self.extent.contains(MapExtent(query.bbox, query.srs)):
return self._get_sub_query(query, format)
resp = self.client.retrieve(query, format)
return ImageSource(resp, size=query.size, image_opts=self.image_opts)
def _get_sub_query(self, query, format):
size, offset, bbox = bbox_position_in_image(query.bbox, query.size, self.extent.bbox_for(query.srs))
if size[0] == 0 or size[1] == 0:
raise BlankImage()
src_query = MapQuery(bbox, size, query.srs, format, dimensions=query.dimensions)
resp = self.client.retrieve(src_query, format)
return SubImageSource(resp, size=query.size, offset=offset, image_opts=self.image_opts)
def _get_transformed(self, query, format):
dst_srs = query.srs
src_srs = self.supported_srs.best_srs(dst_srs)
dst_bbox = query.bbox
src_bbox = dst_srs.transform_bbox_to(src_srs, dst_bbox)
src_width, src_height = src_bbox[2]-src_bbox[0], src_bbox[3]-src_bbox[1]
ratio = src_width/src_height
dst_size = query.size
xres, yres = src_width/dst_size[0], src_height/dst_size[1]
if xres < yres:
src_size = dst_size[0], int(dst_size[0]/ratio + 0.5)
else:
src_size = int(dst_size[1]*ratio +0.5), dst_size[1]
src_query = MapQuery(src_bbox, src_size, src_srs, format, dimensions=query.dimensions)
if self.coverage and not self.coverage.contains(src_bbox, src_srs):
img = self._get_sub_query(src_query, format)
else:
resp = self.client.retrieve(src_query, format)
img = ImageSource(resp, size=src_size, image_opts=self.image_opts)
img = ImageTransformer(src_srs, dst_srs).transform(img, src_bbox,
query.size, dst_bbox, self.image_opts)
img.format = format
return img
def _is_compatible(self, other, query):
if not isinstance(other, WMSSource):
return False
if self.opacity is not None or other.opacity is not None:
return False
if self.supported_srs != other.supported_srs:
return False
if self.supported_formats != other.supported_formats:
return False
if self.transparent_color != other.transparent_color:
return False
if self.transparent_color_tolerance != other.transparent_color_tolerance:
return False
if self.coverage != other.coverage:
return False
if (query.dimensions_for_params(self.fwd_req_params) !=
query.dimensions_for_params(other.fwd_req_params)):
return False
return True
def combined_layer(self, other, query):
if not self._is_compatible(other, query):
return None
client = self.client.combined_client(other.client, query)
if not client:
return None
return WMSSource(client, image_opts=self.image_opts,
transparent_color=self.transparent_color,
transparent_color_tolerance=self.transparent_color_tolerance,
supported_srs=self.supported_srs,
supported_formats=self.supported_formats,
res_range=None, # layer outside res_range should already be filtered out
coverage=self.coverage,
fwd_req_params=self.fwd_req_params,
)
class WMSInfoSource(InfoSource):
def __init__(self, client, fi_transformer=None, coverage=None):
self.client = client
self.fi_transformer = fi_transformer
self.coverage = coverage
def get_info(self, query):
if self.coverage and not self.coverage.contains(query.coord, query.srs):
return None
doc = self.client.get_info(query)
if self.fi_transformer:
doc = self.fi_transformer(doc)
return doc
class WMSLegendSource(LegendSource):
def __init__(self, clients, legend_cache, static=False):
self.clients = clients
self.identifier = legend_identifier([c.identifier for c in self.clients])
self._cache = legend_cache
self._size = None
self.static = static
@property
def size(self):
if not self._size:
legend = self.get_legend(LegendQuery(format='image/png', scale=None))
# TODO image size without as_image?
self._size = legend.as_image().size
return self._size
def get_legend(self, query):
if self.static:
# prevent caching of static legends for different scales
legend = Legend(id=self.identifier, scale=None)
else:
legend = Legend(id=self.identifier, scale=query.scale)
if not self._cache.load(legend):
legends = []
error_occured = False
for client in self.clients:
try:
legends.append(client.get_legend(query))
except HTTPClientError as e:
error_occured = True
log.error(e.args[0])
except SourceError as e:
error_occured = True
# TODO errors?
log.error(e.args[0])
format = split_mime_type(query.format)[1]
legend = Legend(source=concat_legends(legends, format=format),
id=self.identifier, scale=query.scale)
if not error_occured:
self._cache.store(legend)
return legend.source | en | 0.831392 | # This file is part of the MapProxy project. # Copyright (C) 2010 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Retrieve maps/information from WMS servers. Returns true if we are sure that the image is not transparent. # not transparent and no coverage # not transparent and completely inside coverage # srs can be equal while still having a different srs_code (EPSG:3857/900913), make sure to use a supported srs_code # layer outside res_range should already be filtered out # TODO image size without as_image? # prevent caching of static legends for different scales # TODO errors? | 1.658825 | 2 |
atlas/testing/stageless_acceptance/fixtures/stageless_project/driver.py | manesioz/atlas | 1 | 6630053 | <gh_stars>1-10
import foundations
from foundations import set_tag
from foundations_contrib.global_state import current_foundations_context
from model import *
set_tag('model', 'cnn')
def print_words():
print(f'Job \'{current_foundations_context().job_id()}\' deployed')
print('Hello World!')
print_words()
addition_result = add(82,2)
set_tag('Loss', addition_result)
subtraction_result = subtract(44,2)
foundations.log_metric('Accuracy', subtraction_result)
| import foundations
from foundations import set_tag
from foundations_contrib.global_state import current_foundations_context
from model import *
set_tag('model', 'cnn')
def print_words():
print(f'Job \'{current_foundations_context().job_id()}\' deployed')
print('Hello World!')
print_words()
addition_result = add(82,2)
set_tag('Loss', addition_result)
subtraction_result = subtract(44,2)
foundations.log_metric('Accuracy', subtraction_result) | none | 1 | 2.112657 | 2 |
|
capablerobot_usbhub/device.py | d-c-d/CapableRobot_USBHub_Driver | 1 | 6630054 | <filename>capablerobot_usbhub/device.py
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME> for Capable Robot Components
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import struct
import logging
import weakref
from .i2c import USBHubI2C
from .spi import USBHubSPI
from .gpio import USBHubGPIO
from .power import USBHubPower
from .config import USBHubConfig
from .util import *
EEPROM_I2C_ADDR = 0x50
EEPROM_EUI_ADDR = 0xFA
EEPROM_EUI_BYTES = 0xFF - 0xFA + 1
EEPROM_SKU_ADDR = 0x00
EEPROM_SKU_BYTES = 0x06
MCP_I2C_ADDR = 0x20
MCP_REG_GPIO = 0x09
class USBHubDevice:
CMD_REG_WRITE = 0x03
CMD_REG_READ = 0x04
REG_BASE_DFT = 0xBF800000
REG_BASE_ALT = 0xBFD20000
def __init__(self, main, handle,
timeout = 100,
i2c_attempts_max = 5,
i2c_attempt_delay = 10,
disable_i2c = False
):
self.main = main
self.handle = handle
self._serial = None
self._sku = None
self._revision = None
proxy = weakref.proxy(self)
## Function to enable the I2C bus
##
## This is bound here so that passed-in parameters don't have
## to be saved as object parameters, and we can still enable later
## if we need to.
def enable_i2c():
self.i2c = USBHubI2C(proxy,
timeout = timeout,
attempts_max = i2c_attempts_max,
attempt_delay = i2c_attempt_delay
)
if disable_i2c:
## For now, don't enable the I2C bus, but save the fuction to do so to a lambda.
## This allows delayed enabling of the I2C bus if it is needed later
## (e.g. to turn port data on and off).
self.i2c = None
self.enable_i2c = lambda : enable_i2c()
else:
enable_i2c()
self.spi = USBHubSPI(proxy, timeout=timeout)
self.gpio = USBHubGPIO(proxy)
self.power = USBHubPower(proxy)
self.config = USBHubConfig(proxy)
logging.debug("Device class created")
logging.debug("Firmware version {} running on {}".format(self.config.version, self.config.circuitpython_version))
def register_read(self, name=None, addr=None, length=1, print=False, endian='big'):
if name != None:
addr, bits, endian = self.main.find_register_by_name(name)
length = int(bits / 8)
else:
try:
name = self.main.find_register_name_by_addr(addr)
except :
print = False
bits = length * 8
if addr == None:
raise ValueError('Must specify an name or address')
## Need to offset the register address for USB access
address = addr + self.REG_BASE_DFT
## Split 32 bit register address into the 16 bit value & index fields
value = address & 0xFFFF
index = address >> 16
data = list(self.handle.ctrl_transfer(REQ_IN, self.CMD_REG_READ, value, index, length))
if length != len(data):
raise ValueError('Incorrect data length')
shift = 0
if bits == 8:
code = 'B'
elif bits == 16:
code = 'H'
elif bits == 24:
## There is no good way to extract a 3 byte number.
##
## So we tell pack it's a 4 byte number and shift all the data over 1 byte
## so it decodes correctly (as the register defn starts from the MSB)
code = 'L'
shift = 8
elif bits == 32:
code = 'L'
if name is None:
parsed = None
else:
num = bits_to_bytes(bits)
value = int_from_bytes(data, endian)
stream = struct.pack(">HB" + code, *[addr, num, value << shift])
parsed = self.main.parse_register(name, stream)
if print:
self.main.print_register(parsed)
logging.debug("{} [0x{}] read {} [{}]".format(name, hexstr(addr), length, " ".join(["0x"+hexstr(v) for v in data])))
return data, parsed
def register_write(self, name=None, addr=None, buf=[]):
if name != None:
addr, _, _ = self.find_register_by_name(name)
if addr == None:
raise ValueError('Must specify an name or address')
## Need to offset the register address for USB access
address = addr + self.REG_BASE_DFT
## Split 32 bit register address into the 16 bit value & index fields
value = address & 0xFFFF
index = address >> 16
try:
length = self.handle.ctrl_transfer(REQ_OUT, self.CMD_REG_WRITE, value, index, buf)
except usb.core.USBError:
raise OSError('Unable to write to register {}'.format(addr))
if length != len(buf):
raise OSError('Number of bytes written to bus was {}, expected {}'.format(length, len(buf)))
return length
def connections(self):
_, conn = self.register_read(name='port::connection')
return [conn.body[key] == 1 for key in register_keys(conn)]
def speeds(self):
_, speed = self.register_read(name='port::device_speed')
speeds = ['none', 'low', 'full', 'high']
return [speeds[speed.body[key]] for key in register_keys(speed)]
@property
def serial(self):
if self.i2c is None:
return None
if self._serial is None:
data = self.i2c.read_i2c_block_data(EEPROM_I2C_ADDR, EEPROM_EUI_ADDR, EEPROM_EUI_BYTES)
data = [char for char in data]
if len(data) == 6:
data = data[0:3] + [0xFF, 0xFE] + data[3:6]
self._serial = ''.join(["%0.2X" % v for v in data])
return self._serial
@property
def usb_path(self):
return "{}-{}".format(self.handle.bus, self.handle.address)
@property
def key(self):
if self.i2c is None:
return self.usb_path
return self.serial[-self.main.KEY_LENGTH:]
@property
def sku(self):
if self.i2c is None:
return None
if self._sku is None:
data = self.i2c.read_i2c_block_data(EEPROM_I2C_ADDR, EEPROM_SKU_ADDR, EEPROM_SKU_BYTES+1)
if data[0] == 0 or data[0] == 255:
## Prototype units didn't have the PCB SKU programmed into the EEPROM
## If EEPROM location is empty, we assume we're interacting with that hardware
self._sku = ['......', 0]
else:
## Cache the SKU and the revision stored in the EEPROM
self._sku = [
''.join([chr(char) for char in data[0:EEPROM_SKU_BYTES]]),
data[EEPROM_SKU_BYTES]
]
## Return just the SKU (not the revision value)
return self._sku[0]
@property
def mpn(self):
return self.sku
@property
def rev(self):
return self.revision
@property
def revision(self):
if self._revision is None:
## There was a hardware change between REV 1 and REV 2 which
## necessites the host-side driver knowing of that change.
## Data should be correct in EEPROM, but the on-hub firmware
## puts hardware revision in this register with the format
## of [REV, 'C']. If 'C' is in the second byte, the first
## byte has valid hardware information.
data, _ = self.register_read(addr=0x3004, length=2)
if data[1] == ord('C'):
self._revision = data[0]
else:
## Firmware has not set the revision in this address, so fall back
## to pulling hardwar revision from the on-device EEPROM
_ = self.sku
self._revision = self._sku[1]
return self._revision
def check_hardware_revision(self):
_ = self.sku
return self.revision == self._sku[1]
def _data_state(self):
if self.config.version > 1:
return self.config.get("data_state")
if self.i2c is None:
self.enable_i2c()
return self.i2c.read_i2c_block_data(MCP_I2C_ADDR, MCP_REG_GPIO, 1)[0]
def data_state(self):
value = self._data_state()
return ["off" if get_bit(value, idx) else "on" for idx in [7,6,5,4]]
def data_enable(self, ports=[]):
value = self._data_state()
for port in ports:
value = clear_bit(value, 8-port)
if self.config.version > 1:
self.config.set("data_state", int(value))
else:
self.i2c.write_bytes(MCP_I2C_ADDR, bytes([MCP_REG_GPIO, int(value)]))
def data_disable(self, ports=[]):
value = self._data_state()
for port in ports:
value = set_bit(value, 8-port)
if self.config.version > 1:
self.config.set("data_state", int(value))
else:
self.i2c.write_bytes(MCP_I2C_ADDR, bytes([MCP_REG_GPIO, int(value)])) | <filename>capablerobot_usbhub/device.py
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME> for Capable Robot Components
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import struct
import logging
import weakref
from .i2c import USBHubI2C
from .spi import USBHubSPI
from .gpio import USBHubGPIO
from .power import USBHubPower
from .config import USBHubConfig
from .util import *
EEPROM_I2C_ADDR = 0x50
EEPROM_EUI_ADDR = 0xFA
EEPROM_EUI_BYTES = 0xFF - 0xFA + 1
EEPROM_SKU_ADDR = 0x00
EEPROM_SKU_BYTES = 0x06
MCP_I2C_ADDR = 0x20
MCP_REG_GPIO = 0x09
class USBHubDevice:
CMD_REG_WRITE = 0x03
CMD_REG_READ = 0x04
REG_BASE_DFT = 0xBF800000
REG_BASE_ALT = 0xBFD20000
def __init__(self, main, handle,
timeout = 100,
i2c_attempts_max = 5,
i2c_attempt_delay = 10,
disable_i2c = False
):
self.main = main
self.handle = handle
self._serial = None
self._sku = None
self._revision = None
proxy = weakref.proxy(self)
## Function to enable the I2C bus
##
## This is bound here so that passed-in parameters don't have
## to be saved as object parameters, and we can still enable later
## if we need to.
def enable_i2c():
self.i2c = USBHubI2C(proxy,
timeout = timeout,
attempts_max = i2c_attempts_max,
attempt_delay = i2c_attempt_delay
)
if disable_i2c:
## For now, don't enable the I2C bus, but save the fuction to do so to a lambda.
## This allows delayed enabling of the I2C bus if it is needed later
## (e.g. to turn port data on and off).
self.i2c = None
self.enable_i2c = lambda : enable_i2c()
else:
enable_i2c()
self.spi = USBHubSPI(proxy, timeout=timeout)
self.gpio = USBHubGPIO(proxy)
self.power = USBHubPower(proxy)
self.config = USBHubConfig(proxy)
logging.debug("Device class created")
logging.debug("Firmware version {} running on {}".format(self.config.version, self.config.circuitpython_version))
def register_read(self, name=None, addr=None, length=1, print=False, endian='big'):
if name != None:
addr, bits, endian = self.main.find_register_by_name(name)
length = int(bits / 8)
else:
try:
name = self.main.find_register_name_by_addr(addr)
except :
print = False
bits = length * 8
if addr == None:
raise ValueError('Must specify an name or address')
## Need to offset the register address for USB access
address = addr + self.REG_BASE_DFT
## Split 32 bit register address into the 16 bit value & index fields
value = address & 0xFFFF
index = address >> 16
data = list(self.handle.ctrl_transfer(REQ_IN, self.CMD_REG_READ, value, index, length))
if length != len(data):
raise ValueError('Incorrect data length')
shift = 0
if bits == 8:
code = 'B'
elif bits == 16:
code = 'H'
elif bits == 24:
## There is no good way to extract a 3 byte number.
##
## So we tell pack it's a 4 byte number and shift all the data over 1 byte
## so it decodes correctly (as the register defn starts from the MSB)
code = 'L'
shift = 8
elif bits == 32:
code = 'L'
if name is None:
parsed = None
else:
num = bits_to_bytes(bits)
value = int_from_bytes(data, endian)
stream = struct.pack(">HB" + code, *[addr, num, value << shift])
parsed = self.main.parse_register(name, stream)
if print:
self.main.print_register(parsed)
logging.debug("{} [0x{}] read {} [{}]".format(name, hexstr(addr), length, " ".join(["0x"+hexstr(v) for v in data])))
return data, parsed
def register_write(self, name=None, addr=None, buf=[]):
if name != None:
addr, _, _ = self.find_register_by_name(name)
if addr == None:
raise ValueError('Must specify an name or address')
## Need to offset the register address for USB access
address = addr + self.REG_BASE_DFT
## Split 32 bit register address into the 16 bit value & index fields
value = address & 0xFFFF
index = address >> 16
try:
length = self.handle.ctrl_transfer(REQ_OUT, self.CMD_REG_WRITE, value, index, buf)
except usb.core.USBError:
raise OSError('Unable to write to register {}'.format(addr))
if length != len(buf):
raise OSError('Number of bytes written to bus was {}, expected {}'.format(length, len(buf)))
return length
def connections(self):
_, conn = self.register_read(name='port::connection')
return [conn.body[key] == 1 for key in register_keys(conn)]
def speeds(self):
_, speed = self.register_read(name='port::device_speed')
speeds = ['none', 'low', 'full', 'high']
return [speeds[speed.body[key]] for key in register_keys(speed)]
@property
def serial(self):
if self.i2c is None:
return None
if self._serial is None:
data = self.i2c.read_i2c_block_data(EEPROM_I2C_ADDR, EEPROM_EUI_ADDR, EEPROM_EUI_BYTES)
data = [char for char in data]
if len(data) == 6:
data = data[0:3] + [0xFF, 0xFE] + data[3:6]
self._serial = ''.join(["%0.2X" % v for v in data])
return self._serial
@property
def usb_path(self):
return "{}-{}".format(self.handle.bus, self.handle.address)
@property
def key(self):
if self.i2c is None:
return self.usb_path
return self.serial[-self.main.KEY_LENGTH:]
@property
def sku(self):
if self.i2c is None:
return None
if self._sku is None:
data = self.i2c.read_i2c_block_data(EEPROM_I2C_ADDR, EEPROM_SKU_ADDR, EEPROM_SKU_BYTES+1)
if data[0] == 0 or data[0] == 255:
## Prototype units didn't have the PCB SKU programmed into the EEPROM
## If EEPROM location is empty, we assume we're interacting with that hardware
self._sku = ['......', 0]
else:
## Cache the SKU and the revision stored in the EEPROM
self._sku = [
''.join([chr(char) for char in data[0:EEPROM_SKU_BYTES]]),
data[EEPROM_SKU_BYTES]
]
## Return just the SKU (not the revision value)
return self._sku[0]
@property
def mpn(self):
return self.sku
@property
def rev(self):
return self.revision
@property
def revision(self):
if self._revision is None:
## There was a hardware change between REV 1 and REV 2 which
## necessites the host-side driver knowing of that change.
## Data should be correct in EEPROM, but the on-hub firmware
## puts hardware revision in this register with the format
## of [REV, 'C']. If 'C' is in the second byte, the first
## byte has valid hardware information.
data, _ = self.register_read(addr=0x3004, length=2)
if data[1] == ord('C'):
self._revision = data[0]
else:
## Firmware has not set the revision in this address, so fall back
## to pulling hardwar revision from the on-device EEPROM
_ = self.sku
self._revision = self._sku[1]
return self._revision
def check_hardware_revision(self):
_ = self.sku
return self.revision == self._sku[1]
def _data_state(self):
if self.config.version > 1:
return self.config.get("data_state")
if self.i2c is None:
self.enable_i2c()
return self.i2c.read_i2c_block_data(MCP_I2C_ADDR, MCP_REG_GPIO, 1)[0]
def data_state(self):
value = self._data_state()
return ["off" if get_bit(value, idx) else "on" for idx in [7,6,5,4]]
def data_enable(self, ports=[]):
value = self._data_state()
for port in ports:
value = clear_bit(value, 8-port)
if self.config.version > 1:
self.config.set("data_state", int(value))
else:
self.i2c.write_bytes(MCP_I2C_ADDR, bytes([MCP_REG_GPIO, int(value)]))
def data_disable(self, ports=[]):
value = self._data_state()
for port in ports:
value = set_bit(value, 8-port)
if self.config.version > 1:
self.config.set("data_state", int(value))
else:
self.i2c.write_bytes(MCP_I2C_ADDR, bytes([MCP_REG_GPIO, int(value)])) | en | 0.817432 | # The MIT License (MIT) # # Copyright (c) 2019 <NAME> for Capable Robot Components # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ## Function to enable the I2C bus ## ## This is bound here so that passed-in parameters don't have ## to be saved as object parameters, and we can still enable later ## if we need to. ## For now, don't enable the I2C bus, but save the fuction to do so to a lambda. ## This allows delayed enabling of the I2C bus if it is needed later ## (e.g. to turn port data on and off). ## Need to offset the register address for USB access ## Split 32 bit register address into the 16 bit value & index fields ## There is no good way to extract a 3 byte number. ## ## So we tell pack it's a 4 byte number and shift all the data over 1 byte ## so it decodes correctly (as the register defn starts from the MSB) ## Need to offset the register address for USB access ## Split 32 bit register address into the 16 bit value & index fields ## Prototype units didn't have the PCB SKU programmed into the EEPROM ## If EEPROM location is empty, we assume we're interacting with that hardware ## Cache the SKU and the revision stored in the EEPROM ## Return just the SKU (not the revision value) ## There was a hardware change between REV 1 and REV 2 which ## necessites the host-side driver knowing of that change. ## Data should be correct in EEPROM, but the on-hub firmware ## puts hardware revision in this register with the format ## of [REV, 'C']. If 'C' is in the second byte, the first ## byte has valid hardware information. ## Firmware has not set the revision in this address, so fall back ## to pulling hardwar revision from the on-device EEPROM | 1.706022 | 2 |
codes/django/curso-django/webplayground/webplayground/core/views.py | crisconru/snippetshell | 1 | 6630055 | <filename>codes/django/curso-django/webplayground/webplayground/core/views.py
from django.views.generic.base import TemplateView
from django.shortcuts import render
class HomePageView(TemplateView):
template_name = "core/home.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name, {'title':"Mi Super Web Playground"})
class SamplePageView(TemplateView):
template_name = "core/sample.html" | <filename>codes/django/curso-django/webplayground/webplayground/core/views.py
from django.views.generic.base import TemplateView
from django.shortcuts import render
class HomePageView(TemplateView):
template_name = "core/home.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name, {'title':"Mi Super Web Playground"})
class SamplePageView(TemplateView):
template_name = "core/sample.html" | none | 1 | 1.863384 | 2 |
|
lib/JumpScale/lib/perftesttools/PerfTestToolsFactory.py | rudecs/jumpscale_core7 | 0 | 6630056 | from JumpScale import j
from NodeNas import NodeNas
from NodeHost import NodeHost
from NodeMonitor import NodeMonitor
from MonitorTools import *
from InfluxDumper import *
import os
class PerfTestToolsFactory(object):
"""
j.tools.perftesttools.getNodeMonitor("localhost",22)
make sure there is influxdb running on monitor node (root/root)
make sure there is redis running on monitor node with passwd as specified
for example script
call self.getScript()
"""
def __init__(self):
self.monitorNodeIp=None
self.monitorNodeSSHPort=None
self.redispasswd=""
self.nodes=[]
self.sshkey=None
def init(self, testname, monitorNodeIp, sshPort, redispasswd="", sshkey=None):
"""
sshkey can be path to key or the private key itself
the goal is you use ssh-agent & your keys pre-loaded, best not to manually work with keys !!!
"""
self.testname=testname
self.monitorNodeIp=monitorNodeIp
self.monitorNodeSSHPort=sshPort
self.redispasswd=<PASSWORD>
if j.system.fs.exists(path=sshkey):
sshkey=j.system.fs.fileGetContents(sshkey)
self.sshkey=sshkey
path="%s/.ssh/testevn"%os.environ["HOME"]
j.system.fs.writeFile(path,self.sshkey)
j.system.fs.chmod(path,0o600)
j.do.loadSSHKeys()
def getNodeNAS(self, ipaddr,sshport=22,nrdisks=0,fstype="xfs", role='',debugdisk="",name=""):
"""
@param debug when True it means we will use this for development purposes & not init & mount local disks
"""
n=NodeNas(ipaddr=ipaddr, sshport=sshport, nrdisks=nrdisks, fstype=fstype,debugdisk=debugdisk,name=name)
self.nodes.append(n)
return n
def getNodeHost(self, ipaddr,sshport=22,name=""):
n=NodeHost(ipaddr,sshport,name=name)
self.nodes.append(n)
return n
def getNodeBase(self, ipaddr,sshport=22,name=""):
n=NodeHost(ipaddr,sshport,name=name)
self.nodes.append(n)
return n
def getNodeMonitor(self,name=""):
n=NodeMonitor(self.monitorNodeIp,self.monitorNodeSSHPort,name=name)
self.nodes.append(n)
return n
def getExampleScript(self,path=None):
dirpath=j.system.fs.getDirName(os.path.realpath(__file__))
path2="%s/exampleScriptexampleScript"%dirpath
C=j.system.fs.fileGetContents(path2)
if path!=None:
j.system.fs.writeFile(filename=path,contents=C)
return C
def monitor(self):
"""
will do monitoring & send results to redis, env is used to get config parameters from
"""
nodename=os.environ["nodename"]
if nodename=="":
nodename=j.do.execute("hostname")[1].strip()
net=os.environ["net"]=='1'
disks=[item.strip() for item in os.environ["disks"].split(",") if item.strip()!=""]
cpu=os.environ["cpu"]=='1'
redis=j.clients.redis.getRedisClient(os.environ["redishost"], os.environ["redisport"])
m=MonitorTools(redis,nodename)
m.startMonitorLocal(disks,cpu,net)
def influxpump(self):
"""
will dump redis stats into influxdb & env is used to get config parameters from
influxdb is always on localhost & std login/passwd
"""
redis=j.clients.redis.getRedisClient(os.environ["redishost"], os.environ["redisport"])
d=InfluxDumper(os.environ["testname"],redis)
d.start()
| from JumpScale import j
from NodeNas import NodeNas
from NodeHost import NodeHost
from NodeMonitor import NodeMonitor
from MonitorTools import *
from InfluxDumper import *
import os
class PerfTestToolsFactory(object):
"""
j.tools.perftesttools.getNodeMonitor("localhost",22)
make sure there is influxdb running on monitor node (root/root)
make sure there is redis running on monitor node with passwd as specified
for example script
call self.getScript()
"""
def __init__(self):
self.monitorNodeIp=None
self.monitorNodeSSHPort=None
self.redispasswd=""
self.nodes=[]
self.sshkey=None
def init(self, testname, monitorNodeIp, sshPort, redispasswd="", sshkey=None):
"""
sshkey can be path to key or the private key itself
the goal is you use ssh-agent & your keys pre-loaded, best not to manually work with keys !!!
"""
self.testname=testname
self.monitorNodeIp=monitorNodeIp
self.monitorNodeSSHPort=sshPort
self.redispasswd=<PASSWORD>
if j.system.fs.exists(path=sshkey):
sshkey=j.system.fs.fileGetContents(sshkey)
self.sshkey=sshkey
path="%s/.ssh/testevn"%os.environ["HOME"]
j.system.fs.writeFile(path,self.sshkey)
j.system.fs.chmod(path,0o600)
j.do.loadSSHKeys()
def getNodeNAS(self, ipaddr,sshport=22,nrdisks=0,fstype="xfs", role='',debugdisk="",name=""):
"""
@param debug when True it means we will use this for development purposes & not init & mount local disks
"""
n=NodeNas(ipaddr=ipaddr, sshport=sshport, nrdisks=nrdisks, fstype=fstype,debugdisk=debugdisk,name=name)
self.nodes.append(n)
return n
def getNodeHost(self, ipaddr,sshport=22,name=""):
n=NodeHost(ipaddr,sshport,name=name)
self.nodes.append(n)
return n
def getNodeBase(self, ipaddr,sshport=22,name=""):
n=NodeHost(ipaddr,sshport,name=name)
self.nodes.append(n)
return n
def getNodeMonitor(self,name=""):
n=NodeMonitor(self.monitorNodeIp,self.monitorNodeSSHPort,name=name)
self.nodes.append(n)
return n
def getExampleScript(self,path=None):
dirpath=j.system.fs.getDirName(os.path.realpath(__file__))
path2="%s/exampleScriptexampleScript"%dirpath
C=j.system.fs.fileGetContents(path2)
if path!=None:
j.system.fs.writeFile(filename=path,contents=C)
return C
def monitor(self):
"""
will do monitoring & send results to redis, env is used to get config parameters from
"""
nodename=os.environ["nodename"]
if nodename=="":
nodename=j.do.execute("hostname")[1].strip()
net=os.environ["net"]=='1'
disks=[item.strip() for item in os.environ["disks"].split(",") if item.strip()!=""]
cpu=os.environ["cpu"]=='1'
redis=j.clients.redis.getRedisClient(os.environ["redishost"], os.environ["redisport"])
m=MonitorTools(redis,nodename)
m.startMonitorLocal(disks,cpu,net)
def influxpump(self):
"""
will dump redis stats into influxdb & env is used to get config parameters from
influxdb is always on localhost & std login/passwd
"""
redis=j.clients.redis.getRedisClient(os.environ["redishost"], os.environ["redisport"])
d=InfluxDumper(os.environ["testname"],redis)
d.start()
| en | 0.756933 | j.tools.perftesttools.getNodeMonitor("localhost",22) make sure there is influxdb running on monitor node (root/root) make sure there is redis running on monitor node with passwd as specified for example script call self.getScript() sshkey can be path to key or the private key itself the goal is you use ssh-agent & your keys pre-loaded, best not to manually work with keys !!! @param debug when True it means we will use this for development purposes & not init & mount local disks will do monitoring & send results to redis, env is used to get config parameters from will dump redis stats into influxdb & env is used to get config parameters from influxdb is always on localhost & std login/passwd | 1.635469 | 2 |
ecommerce/core/admin.py | berrondo/ecommerce | 1 | 6630057 | <reponame>berrondo/ecommerce
from django.contrib import admin
from .models import Order, Product, OrderItem
class PickInline(admin.TabularInline):
model = OrderItem
extra = 1
class ProductModelAdmin(admin.ModelAdmin):
list_display = ['name', 'price']
class CartModelAdmin(admin.ModelAdmin):
inlines = (PickInline,)
admin.site.register(Order, CartModelAdmin)
admin.site.register(Product, ProductModelAdmin)
admin.site.register(OrderItem)
| from django.contrib import admin
from .models import Order, Product, OrderItem
class PickInline(admin.TabularInline):
model = OrderItem
extra = 1
class ProductModelAdmin(admin.ModelAdmin):
list_display = ['name', 'price']
class CartModelAdmin(admin.ModelAdmin):
inlines = (PickInline,)
admin.site.register(Order, CartModelAdmin)
admin.site.register(Product, ProductModelAdmin)
admin.site.register(OrderItem) | none | 1 | 1.896767 | 2 |
|
tests/test_amply.py | betterhours/pulp | 1 | 6630058 | import pulp.amply as amply
from io import StringIO
import unittest
class AmplyTest(unittest.TestCase):
def test_data(self):
result = amply.Amply("param T := 4;")['T']
assert result == 4
result = amply.Amply("param T := -4;")['T']
assert result == -4
result = amply.Amply("param T := 0.04;")['T']
assert result == 0.04
result = amply.Amply("param T := -0.04;")['T']
assert result == -0.04
def test_set(self):
result = amply.Amply("set month := Jan Feb Mar Apr;")['month']
assert result == ['Jan', 'Feb', 'Mar', 'Apr']
result = amply.Amply("set month Jan Feb Mar Apr;")['month']
assert result == ['Jan', 'Feb', 'Mar', 'Apr']
assert [i for i in result] == ['Jan', 'Feb', 'Mar', 'Apr']
assert result != []
assert 'Jan' in result
assert 'Foo' not in result
assert len(result) == 4
def test_param(self):
result = amply.Amply("param T := 4;")['T']
assert result != [4]
result = amply.Amply("param T{foo}; param T := 1 2;")['T']
assert not (result == 2)
assert (result != 2)
def test_attr_access(self):
result = amply.Amply("param T:= 4;").T
assert result == 4
def test_from_file(self):
try:
s = StringIO("param T:= 4;")
except TypeError:
s = StringIO(u"param T:= 4;")
assert amply.Amply.from_file(s).T == 4
def test_load_string(self):
a = amply.Amply("param T:= 4; param X{foo};")
a.load_string("param S := 6; param X := 1 2;")
assert a.T == 4
assert a.S == 6
assert a.X[1] == 2
def test_load_file(self):
a = amply.Amply("param T:= 4; param X{foo};")
try:
s = StringIO("param S := 6; param X := 1 2;")
except TypeError:
s = StringIO(u"param S := 6; param X := 1 2;")
a.load_file(s)
assert a.T == 4
assert a.S == 6
assert a.X[1] == 2
def test_empty_init(self):
a = amply.Amply()
a.load_string("param T := 4;")
assert a.T == 4
def test_set_dimen2(self):
result = amply.Amply(
"""
set twotups dimen 2;
set twotups := (1, 2) (2, 3) (4, 2) (3, 1);
"""
)['twotups']
assert result == [(1, 2), (2, 3), (4, 2), (3, 1)]
def test_set_dimen_error(self):
a = """
set dim1 dimen 1;
set dim1 := (1, 2) (2, 3) (3, 2);
"""
self.assertRaises(amply.AmplyError, lambda: amply.Amply(a))
def test_set_dimen2_noparen(self):
result = amply.Amply(
"""
set twotups dimen 2;
set twotups := 1 2 2 3 4 2 3 1;
"""
)['twotups']
assert result == [(1, 2), (2, 3), (4, 2), (3, 1)]
def test_set_subscript(self):
result = amply.Amply(
"""
set days{months};
set days[Jan] := 1 2 3 4;
set days[Feb] := 5 6 7 8;
"""
)['days']
j = result['Jan']
assert j == [1, 2, 3, 4]
f = result['Feb']
assert f == [5, 6, 7, 8]
def test_set_subscript2(self):
result = amply.Amply(
"""
set days{months, days};
set days[Jan, 3] := 1 2 3 4;
set days[Feb, 'Ham '] := 5 6 7 8;
"""
)['days']
j = result['Jan'][3]
assert j == [1, 2, 3, 4]
f = result['Feb']['Ham ']
assert f == [5, 6, 7, 8]
def test_set_subscript2_tuples(self):
result = amply.Amply(
"""
set days{months, days};
set days[Jan, 3] := 1 2 3 4;
set days[Feb, 'Ham '] := 5 6 7 8;
"""
)['days']
j = result['Jan', 3]
assert j == [1, 2, 3, 4]
f = result['Feb', 'Ham ']
assert f == [5, 6, 7, 8]
def test_set_matrix(self):
result = amply.Amply(
"""
set A : 1 2 3 :=
1 + - -
2 + + -
3 - + -
;
"""
)
a = result.A
assert a == [(1, 1), (2, 1), (2, 2), (3, 2)]
def test_set_matrix_tr(self):
result = amply.Amply(
"""
set A (tr) : 1 2 3 :=
1 + - -
2 + + -
3 - + -
;
"""
)
a = result.A
assert a == [(1, 1), (1, 2), (2, 2), (2, 3)]
def test_set_splice(self):
result = amply.Amply(
"""
set A dimen 3;
set A := (1, 2, 3), (1, 1, *) 2 4 (3, *, *) 1 1;
"""
)
a = result.A
assert a == [(1, 2, 3), (1, 1, 2), (1, 1, 4), (3, 1, 1)]
def test_set_splice_matrix(self):
result = amply.Amply(
"""
set A dimen 3;
set A (1, *, *) : 1 2 3 :=
1 + - -
2 + - +
3 - - -
(2, *, *) : 1 2 3 :=
1 + - +
2 - + -
3 - - +
;
"""
)
a = result.A
assert a == [(1,1,1),(1,2,1),(1,2,3),(2,1,1),(2,1,3),(2,2,2),
(2,3,3)]
def test_simple_params(self):
result = amply.Amply("param T := 4;")['T']
assert result == 4
def test_sub1_params(self):
result = amply.Amply(
"""
param foo {s};
param foo := 1 Jan 2 Feb 3 Mar;
"""
)
j = result['foo'][1]
assert j == 'Jan'
f = result['foo'][2]
assert f == 'Feb'
def test_sub1_param_error(self):
a = """
param foo{s};
param foo := 1 Jan 2 Feb 3;
"""
self.assertRaises(amply.AmplyError, lambda: amply.Amply(a))
def test_param_default(self):
result = amply.Amply(
"""
param foo {s} default 3;
param foo := Jan 1 Feb 2 Mar 3;
"""
)
options = [('Jan', 1),
('Mar', 3),
('FOO', 3)
]
for name, value in options:
self.assertEqual(result['foo'][name], value)
def test_param_undefined(self):
result = amply.Amply(
"""
param foo {s} ;
param foo := Jan 1 Feb 2 Mar 3;
"""
)
j = result['foo']['Jan']
assert j == 1
with self.assertRaises(KeyError):
a = result['foo']['Apr']
def test_sub2_params(self):
result = amply.Amply(
"""
param foo {s, t};
param foo := 1 2 Hi 99 3 4;
"""
)
h = result['foo'][1][2]
assert h == 'Hi'
f = result['foo'][99][3]
assert f == 4
def test_2d_param(self):
result = amply.Amply(
"""
param demand {item, location};
param demand
: FRA DET LAN :=
spoons 200 100 30
plates 30 120 90
cups 666 13 29 ;
"""
)['demand']
options = [('spoons', { 'FRA': 200, 'DET': 100, 'LAN': 30 }),
('plates', { 'FRA': 30, 'DET': 120, 'LAN': 90 }),
('cups', { 'FRA': 666, 'DET': 13, 'LAN': 29 })
]
for name, _dict in options:
self.assertDictEqual(result[name], _dict)
def test_2d_numeric_param(self):
result = amply.Amply(
"""
param square {x, y};
param square : 1 2 :=
4 4 8
3 3 6
;
"""
)['square']
f = result[4, 1]
assert f == 4
assert result[4, 2] == 8
assert result[3, 1] == 3
assert result[3, 2] == 6
def test_2d_param_defaults(self):
result = amply.Amply(
"""
param demand {item, location};
param demand default 42
: FRA DET LAN :=
spoons 200 . 30
plates 30 120 .
cups . . 29 ;
"""
)['demand']
options = [('spoons', { 'FRA': 200, 'DET': 42, 'LAN': 30 }),
('plates', { 'FRA': 30, 'DET': 120, 'LAN': 42 }),
('cups', { 'FRA': 42, 'DET': 42, 'LAN': 29 })
]
for name, _dict in options:
self.assertDictEqual(result[name], _dict)
def test_2tables(self):
result = amply.Amply(
"""
param demand {item, location};
param demand default 42
: FRA DET LAN :=
spoons 200 . 30
plates 30 120 .
cups . . 29
;
param square {foo, foo};
param square
: A B :=
A 1 6
B 6 36
;
"""
)
demand = result['demand']
options = [('spoons', { 'FRA': 200, 'DET': 42, 'LAN': 30 }),
('plates', { 'FRA': 30, 'DET': 120, 'LAN': 42 }),
('cups', { 'FRA': 42, 'DET': 42, 'LAN': 29 })
]
for name, _dict in options:
self.assertDictEqual(demand[name], _dict)
square = result['square']
options = [('A', {'A': 1, 'B': 6}),
('B', {'A': 6, 'B': 36}),
]
for name, _dict in options:
self.assertDictEqual(square[name], _dict)
def test_2d_param_transpose(self):
result = amply.Amply(
"""
param demand {location, item};
param demand default 42 (tr)
: FRA DET LAN :=
spoons 200 . 30
plates 30 120 .
cups . . 29 ;
"""
)['demand']
self.assertEqual(result['FRA'], { 'spoons': 200, 'plates': 30, 'cups': 42 })
self.assertEqual(result['DET'], { 'spoons': 42, 'plates': 120, 'cups': 42 })
self.assertEqual(result['LAN'], { 'spoons': 30, 'plates': 42, 'cups': 29 })
def test_2d_slice1(self):
result = amply.Amply(
"""
param demand {location, item};
param demand :=
[Jan, *] Foo 1 Bar 2;
"""
)['demand']
f = result['Jan']['Foo']
assert f == 1
assert result['Jan']['Bar'] == 2
def test_3d_slice2(self):
result = amply.Amply(
"""
param trans_cost{src, dest, product};
param trans_cost :=
[*,*,bands]: FRA DET LAN :=
GARY 30 10 8
CLEV 22 7 10
[*,*,coils]: FRA DET LAN :=
GARY 39 14 11
CLEV 27 9 12
[*,*,plate]: FRA DET LAN :=
GARY 41 15 12
CLEV 29 9 13
;
"""
)['trans_cost']
f = result['GARY']['FRA']['bands']
assert f == 30
assert result['GARY']['DET']['plate'] == 15
assert result['CLEV']['LAN']['coils'] == 12
def test_3d_slice2b(self):
result = amply.Amply(
"""
param trans_cost{src, product, dest};
param trans_cost :=
[*,bands,*]: FRA DET LAN :=
GARY 30 10 8
CLEV 22 7 10
[*,coils,*]: FRA DET LAN :=
GARY 39 14 11
CLEV 27 9 12
[*,plate,*]: FRA DET LAN :=
GARY 41 15 12
CLEV 29 9 13
;
"""
)['trans_cost']
f = result['GARY']['bands']['FRA']
assert f == 30
assert result['GARY']['plate']['DET'] == 15
assert result['CLEV']['coils']['LAN'] == 12
def test_single_tabbing_data(self):
result = amply.Amply(
"""
set elem;
param init_stock{elem};
param cost{elem};
param value{elem};
param : init_stock cost value :=
iron 7 25 1
nickel 35 3 2
;
"""
)
s = result['init_stock']
assert s == {'iron': 7, 'nickel': 35}
assert result['cost'] == {'iron': 25, 'nickel': 3}
assert result['value'] == {'iron': 1, 'nickel': 2}
def test_single_tabbing_data_with_set(self):
result = amply.Amply(
"""
set elem;
param init_stock{elem};
param cost{elem};
param value{elem};
param : elem : init_stock cost value :=
iron 7 25 1
nickel 35 3 2
;
"""
)
s = result['init_stock']
assert s == {'iron': 7, 'nickel': 35}
assert result['cost'] == {'iron': 25, 'nickel': 3}
assert result['value'] == {'iron': 1, 'nickel': 2}
def test_set2_tabbing(self):
result = amply.Amply(
"""
set elem dimen 2;
set elem := 0 0 1 1 2 2;
param cost{elem};
param value{elem};
param : cost value :=
0 0 7 25
1 1 35 3
;
"""
)
assert result['elem'] == [(0,0),(1,1),(2,2)]
def test_undefined_tabbing_param(self):
a = """
param cost{elem};
param : cost value :=
0 1 2
3 4 5
;
"""
self.assertRaises(amply.AmplyError, lambda: amply.Amply(a))
def test_2dset_simpleparam(self):
result = amply.Amply(
"""
set elem dimen 2;
param foo{elem};
param foo :=
1 2 3
2 3 4
3 4 5
;
"""
)['foo']
f = result[1][2]
assert f == 3
assert result[2][3] == 4
assert result[3][4] == 5
def test_tuple_param(self):
result = amply.Amply(
"""
set elem dimen 2;
param foo{elem};
param foo :=
1 2 3
2 3 4
3 4 5
;
"""
)['foo']
f = result[1,2]
assert f == 3
assert result[2,3] == 4
assert result[3,4] == 5
def test_comment(self):
result = amply.Amply(
"""
# a comment
set elem dimen 2;
param foo{elem};
param foo :=
1 2 3
2 3 4
3 4 5
;
"""
)['foo']
f = result[1,2]
assert f == 3
assert result[2,3] == 4
assert result[3,4] == 5
if __name__ == '__main__':
unittest.main()
| import pulp.amply as amply
from io import StringIO
import unittest
class AmplyTest(unittest.TestCase):
def test_data(self):
result = amply.Amply("param T := 4;")['T']
assert result == 4
result = amply.Amply("param T := -4;")['T']
assert result == -4
result = amply.Amply("param T := 0.04;")['T']
assert result == 0.04
result = amply.Amply("param T := -0.04;")['T']
assert result == -0.04
def test_set(self):
result = amply.Amply("set month := Jan Feb Mar Apr;")['month']
assert result == ['Jan', 'Feb', 'Mar', 'Apr']
result = amply.Amply("set month Jan Feb Mar Apr;")['month']
assert result == ['Jan', 'Feb', 'Mar', 'Apr']
assert [i for i in result] == ['Jan', 'Feb', 'Mar', 'Apr']
assert result != []
assert 'Jan' in result
assert 'Foo' not in result
assert len(result) == 4
def test_param(self):
result = amply.Amply("param T := 4;")['T']
assert result != [4]
result = amply.Amply("param T{foo}; param T := 1 2;")['T']
assert not (result == 2)
assert (result != 2)
def test_attr_access(self):
result = amply.Amply("param T:= 4;").T
assert result == 4
def test_from_file(self):
try:
s = StringIO("param T:= 4;")
except TypeError:
s = StringIO(u"param T:= 4;")
assert amply.Amply.from_file(s).T == 4
def test_load_string(self):
a = amply.Amply("param T:= 4; param X{foo};")
a.load_string("param S := 6; param X := 1 2;")
assert a.T == 4
assert a.S == 6
assert a.X[1] == 2
def test_load_file(self):
a = amply.Amply("param T:= 4; param X{foo};")
try:
s = StringIO("param S := 6; param X := 1 2;")
except TypeError:
s = StringIO(u"param S := 6; param X := 1 2;")
a.load_file(s)
assert a.T == 4
assert a.S == 6
assert a.X[1] == 2
def test_empty_init(self):
a = amply.Amply()
a.load_string("param T := 4;")
assert a.T == 4
def test_set_dimen2(self):
result = amply.Amply(
"""
set twotups dimen 2;
set twotups := (1, 2) (2, 3) (4, 2) (3, 1);
"""
)['twotups']
assert result == [(1, 2), (2, 3), (4, 2), (3, 1)]
def test_set_dimen_error(self):
a = """
set dim1 dimen 1;
set dim1 := (1, 2) (2, 3) (3, 2);
"""
self.assertRaises(amply.AmplyError, lambda: amply.Amply(a))
def test_set_dimen2_noparen(self):
result = amply.Amply(
"""
set twotups dimen 2;
set twotups := 1 2 2 3 4 2 3 1;
"""
)['twotups']
assert result == [(1, 2), (2, 3), (4, 2), (3, 1)]
def test_set_subscript(self):
result = amply.Amply(
"""
set days{months};
set days[Jan] := 1 2 3 4;
set days[Feb] := 5 6 7 8;
"""
)['days']
j = result['Jan']
assert j == [1, 2, 3, 4]
f = result['Feb']
assert f == [5, 6, 7, 8]
def test_set_subscript2(self):
result = amply.Amply(
"""
set days{months, days};
set days[Jan, 3] := 1 2 3 4;
set days[Feb, 'Ham '] := 5 6 7 8;
"""
)['days']
j = result['Jan'][3]
assert j == [1, 2, 3, 4]
f = result['Feb']['Ham ']
assert f == [5, 6, 7, 8]
def test_set_subscript2_tuples(self):
result = amply.Amply(
"""
set days{months, days};
set days[Jan, 3] := 1 2 3 4;
set days[Feb, 'Ham '] := 5 6 7 8;
"""
)['days']
j = result['Jan', 3]
assert j == [1, 2, 3, 4]
f = result['Feb', 'Ham ']
assert f == [5, 6, 7, 8]
def test_set_matrix(self):
result = amply.Amply(
"""
set A : 1 2 3 :=
1 + - -
2 + + -
3 - + -
;
"""
)
a = result.A
assert a == [(1, 1), (2, 1), (2, 2), (3, 2)]
def test_set_matrix_tr(self):
result = amply.Amply(
"""
set A (tr) : 1 2 3 :=
1 + - -
2 + + -
3 - + -
;
"""
)
a = result.A
assert a == [(1, 1), (1, 2), (2, 2), (2, 3)]
def test_set_splice(self):
result = amply.Amply(
"""
set A dimen 3;
set A := (1, 2, 3), (1, 1, *) 2 4 (3, *, *) 1 1;
"""
)
a = result.A
assert a == [(1, 2, 3), (1, 1, 2), (1, 1, 4), (3, 1, 1)]
def test_set_splice_matrix(self):
result = amply.Amply(
"""
set A dimen 3;
set A (1, *, *) : 1 2 3 :=
1 + - -
2 + - +
3 - - -
(2, *, *) : 1 2 3 :=
1 + - +
2 - + -
3 - - +
;
"""
)
a = result.A
assert a == [(1,1,1),(1,2,1),(1,2,3),(2,1,1),(2,1,3),(2,2,2),
(2,3,3)]
def test_simple_params(self):
result = amply.Amply("param T := 4;")['T']
assert result == 4
def test_sub1_params(self):
result = amply.Amply(
"""
param foo {s};
param foo := 1 Jan 2 Feb 3 Mar;
"""
)
j = result['foo'][1]
assert j == 'Jan'
f = result['foo'][2]
assert f == 'Feb'
def test_sub1_param_error(self):
a = """
param foo{s};
param foo := 1 Jan 2 Feb 3;
"""
self.assertRaises(amply.AmplyError, lambda: amply.Amply(a))
def test_param_default(self):
result = amply.Amply(
"""
param foo {s} default 3;
param foo := Jan 1 Feb 2 Mar 3;
"""
)
options = [('Jan', 1),
('Mar', 3),
('FOO', 3)
]
for name, value in options:
self.assertEqual(result['foo'][name], value)
def test_param_undefined(self):
result = amply.Amply(
"""
param foo {s} ;
param foo := Jan 1 Feb 2 Mar 3;
"""
)
j = result['foo']['Jan']
assert j == 1
with self.assertRaises(KeyError):
a = result['foo']['Apr']
def test_sub2_params(self):
result = amply.Amply(
"""
param foo {s, t};
param foo := 1 2 Hi 99 3 4;
"""
)
h = result['foo'][1][2]
assert h == 'Hi'
f = result['foo'][99][3]
assert f == 4
def test_2d_param(self):
result = amply.Amply(
"""
param demand {item, location};
param demand
: FRA DET LAN :=
spoons 200 100 30
plates 30 120 90
cups 666 13 29 ;
"""
)['demand']
options = [('spoons', { 'FRA': 200, 'DET': 100, 'LAN': 30 }),
('plates', { 'FRA': 30, 'DET': 120, 'LAN': 90 }),
('cups', { 'FRA': 666, 'DET': 13, 'LAN': 29 })
]
for name, _dict in options:
self.assertDictEqual(result[name], _dict)
def test_2d_numeric_param(self):
result = amply.Amply(
"""
param square {x, y};
param square : 1 2 :=
4 4 8
3 3 6
;
"""
)['square']
f = result[4, 1]
assert f == 4
assert result[4, 2] == 8
assert result[3, 1] == 3
assert result[3, 2] == 6
def test_2d_param_defaults(self):
result = amply.Amply(
"""
param demand {item, location};
param demand default 42
: FRA DET LAN :=
spoons 200 . 30
plates 30 120 .
cups . . 29 ;
"""
)['demand']
options = [('spoons', { 'FRA': 200, 'DET': 42, 'LAN': 30 }),
('plates', { 'FRA': 30, 'DET': 120, 'LAN': 42 }),
('cups', { 'FRA': 42, 'DET': 42, 'LAN': 29 })
]
for name, _dict in options:
self.assertDictEqual(result[name], _dict)
def test_2tables(self):
result = amply.Amply(
"""
param demand {item, location};
param demand default 42
: FRA DET LAN :=
spoons 200 . 30
plates 30 120 .
cups . . 29
;
param square {foo, foo};
param square
: A B :=
A 1 6
B 6 36
;
"""
)
demand = result['demand']
options = [('spoons', { 'FRA': 200, 'DET': 42, 'LAN': 30 }),
('plates', { 'FRA': 30, 'DET': 120, 'LAN': 42 }),
('cups', { 'FRA': 42, 'DET': 42, 'LAN': 29 })
]
for name, _dict in options:
self.assertDictEqual(demand[name], _dict)
square = result['square']
options = [('A', {'A': 1, 'B': 6}),
('B', {'A': 6, 'B': 36}),
]
for name, _dict in options:
self.assertDictEqual(square[name], _dict)
def test_2d_param_transpose(self):
result = amply.Amply(
"""
param demand {location, item};
param demand default 42 (tr)
: FRA DET LAN :=
spoons 200 . 30
plates 30 120 .
cups . . 29 ;
"""
)['demand']
self.assertEqual(result['FRA'], { 'spoons': 200, 'plates': 30, 'cups': 42 })
self.assertEqual(result['DET'], { 'spoons': 42, 'plates': 120, 'cups': 42 })
self.assertEqual(result['LAN'], { 'spoons': 30, 'plates': 42, 'cups': 29 })
def test_2d_slice1(self):
result = amply.Amply(
"""
param demand {location, item};
param demand :=
[Jan, *] Foo 1 Bar 2;
"""
)['demand']
f = result['Jan']['Foo']
assert f == 1
assert result['Jan']['Bar'] == 2
def test_3d_slice2(self):
result = amply.Amply(
"""
param trans_cost{src, dest, product};
param trans_cost :=
[*,*,bands]: FRA DET LAN :=
GARY 30 10 8
CLEV 22 7 10
[*,*,coils]: FRA DET LAN :=
GARY 39 14 11
CLEV 27 9 12
[*,*,plate]: FRA DET LAN :=
GARY 41 15 12
CLEV 29 9 13
;
"""
)['trans_cost']
f = result['GARY']['FRA']['bands']
assert f == 30
assert result['GARY']['DET']['plate'] == 15
assert result['CLEV']['LAN']['coils'] == 12
def test_3d_slice2b(self):
result = amply.Amply(
"""
param trans_cost{src, product, dest};
param trans_cost :=
[*,bands,*]: FRA DET LAN :=
GARY 30 10 8
CLEV 22 7 10
[*,coils,*]: FRA DET LAN :=
GARY 39 14 11
CLEV 27 9 12
[*,plate,*]: FRA DET LAN :=
GARY 41 15 12
CLEV 29 9 13
;
"""
)['trans_cost']
f = result['GARY']['bands']['FRA']
assert f == 30
assert result['GARY']['plate']['DET'] == 15
assert result['CLEV']['coils']['LAN'] == 12
def test_single_tabbing_data(self):
result = amply.Amply(
"""
set elem;
param init_stock{elem};
param cost{elem};
param value{elem};
param : init_stock cost value :=
iron 7 25 1
nickel 35 3 2
;
"""
)
s = result['init_stock']
assert s == {'iron': 7, 'nickel': 35}
assert result['cost'] == {'iron': 25, 'nickel': 3}
assert result['value'] == {'iron': 1, 'nickel': 2}
def test_single_tabbing_data_with_set(self):
result = amply.Amply(
"""
set elem;
param init_stock{elem};
param cost{elem};
param value{elem};
param : elem : init_stock cost value :=
iron 7 25 1
nickel 35 3 2
;
"""
)
s = result['init_stock']
assert s == {'iron': 7, 'nickel': 35}
assert result['cost'] == {'iron': 25, 'nickel': 3}
assert result['value'] == {'iron': 1, 'nickel': 2}
def test_set2_tabbing(self):
result = amply.Amply(
"""
set elem dimen 2;
set elem := 0 0 1 1 2 2;
param cost{elem};
param value{elem};
param : cost value :=
0 0 7 25
1 1 35 3
;
"""
)
assert result['elem'] == [(0,0),(1,1),(2,2)]
def test_undefined_tabbing_param(self):
a = """
param cost{elem};
param : cost value :=
0 1 2
3 4 5
;
"""
self.assertRaises(amply.AmplyError, lambda: amply.Amply(a))
def test_2dset_simpleparam(self):
result = amply.Amply(
"""
set elem dimen 2;
param foo{elem};
param foo :=
1 2 3
2 3 4
3 4 5
;
"""
)['foo']
f = result[1][2]
assert f == 3
assert result[2][3] == 4
assert result[3][4] == 5
def test_tuple_param(self):
result = amply.Amply(
"""
set elem dimen 2;
param foo{elem};
param foo :=
1 2 3
2 3 4
3 4 5
;
"""
)['foo']
f = result[1,2]
assert f == 3
assert result[2,3] == 4
assert result[3,4] == 5
def test_comment(self):
result = amply.Amply(
"""
# a comment
set elem dimen 2;
param foo{elem};
param foo :=
1 2 3
2 3 4
3 4 5
;
"""
)['foo']
f = result[1,2]
assert f == 3
assert result[2,3] == 4
assert result[3,4] == 5
if __name__ == '__main__':
unittest.main()
| en | 0.327412 | set twotups dimen 2; set twotups := (1, 2) (2, 3) (4, 2) (3, 1); set dim1 dimen 1; set dim1 := (1, 2) (2, 3) (3, 2); set twotups dimen 2; set twotups := 1 2 2 3 4 2 3 1; set days{months}; set days[Jan] := 1 2 3 4; set days[Feb] := 5 6 7 8; set days{months, days}; set days[Jan, 3] := 1 2 3 4; set days[Feb, 'Ham '] := 5 6 7 8; set days{months, days}; set days[Jan, 3] := 1 2 3 4; set days[Feb, 'Ham '] := 5 6 7 8; set A : 1 2 3 := 1 + - - 2 + + - 3 - + - ; set A (tr) : 1 2 3 := 1 + - - 2 + + - 3 - + - ; set A dimen 3; set A := (1, 2, 3), (1, 1, *) 2 4 (3, *, *) 1 1; set A dimen 3; set A (1, *, *) : 1 2 3 := 1 + - - 2 + - + 3 - - - (2, *, *) : 1 2 3 := 1 + - + 2 - + - 3 - - + ; param foo {s}; param foo := 1 Jan 2 Feb 3 Mar; param foo{s}; param foo := 1 Jan 2 Feb 3; param foo {s} default 3; param foo := Jan 1 Feb 2 Mar 3; param foo {s} ; param foo := Jan 1 Feb 2 Mar 3; param foo {s, t}; param foo := 1 2 Hi 99 3 4; param demand {item, location}; param demand : FRA DET LAN := spoons 200 100 30 plates 30 120 90 cups 666 13 29 ; param square {x, y}; param square : 1 2 := 4 4 8 3 3 6 ; param demand {item, location}; param demand default 42 : FRA DET LAN := spoons 200 . 30 plates 30 120 . cups . . 29 ; param demand {item, location}; param demand default 42 : FRA DET LAN := spoons 200 . 30 plates 30 120 . cups . . 29 ; param square {foo, foo}; param square : A B := A 1 6 B 6 36 ; param demand {location, item}; param demand default 42 (tr) : FRA DET LAN := spoons 200 . 30 plates 30 120 . cups . . 29 ; param demand {location, item}; param demand := [Jan, *] Foo 1 Bar 2; param trans_cost{src, dest, product}; param trans_cost := [*,*,bands]: FRA DET LAN := GARY 30 10 8 CLEV 22 7 10 [*,*,coils]: FRA DET LAN := GARY 39 14 11 CLEV 27 9 12 [*,*,plate]: FRA DET LAN := GARY 41 15 12 CLEV 29 9 13 ; param trans_cost{src, product, dest}; param trans_cost := [*,bands,*]: FRA DET LAN := GARY 30 10 8 CLEV 22 7 10 [*,coils,*]: FRA DET LAN := GARY 39 14 11 CLEV 27 9 12 [*,plate,*]: FRA DET LAN := GARY 41 15 12 CLEV 29 9 13 ; set elem; param init_stock{elem}; param cost{elem}; param value{elem}; param : init_stock cost value := iron 7 25 1 nickel 35 3 2 ; set elem; param init_stock{elem}; param cost{elem}; param value{elem}; param : elem : init_stock cost value := iron 7 25 1 nickel 35 3 2 ; set elem dimen 2; set elem := 0 0 1 1 2 2; param cost{elem}; param value{elem}; param : cost value := 0 0 7 25 1 1 35 3 ; param cost{elem}; param : cost value := 0 1 2 3 4 5 ; set elem dimen 2; param foo{elem}; param foo := 1 2 3 2 3 4 3 4 5 ; set elem dimen 2; param foo{elem}; param foo := 1 2 3 2 3 4 3 4 5 ; # a comment set elem dimen 2; param foo{elem}; param foo := 1 2 3 2 3 4 3 4 5 ; | 2.913942 | 3 |
nixpkgs_review/tests/cli_mocks.py | turboMaCk/nixpkgs-review | 0 | 6630059 | <filename>nixpkgs_review/tests/cli_mocks.py<gh_stars>0
import os
from io import StringIO
from tempfile import TemporaryDirectory
from typing import Any, List, Optional, Tuple, Union
from unittest import TestCase
TEST_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = False
class IgnoreArgument:
def __repr__(self) -> str:
return "(ignored)"
def read_asset(asset: str) -> str:
with open(os.path.join(TEST_ROOT, "assets", asset)) as f:
return f.read()
class MockError(Exception):
pass
class MockCompletedProcess:
def __init__(self, stdout: Optional[Union[str, StringIO]] = None) -> None:
self.returncode = 0
self.stdout = stdout
class Mock:
def __init__(self, arg_spec: List[Tuple[Any, Any]]) -> None:
self.arg_spec_iterator = iter(arg_spec)
def __iter__(self) -> "Mock":
return self
def __call__(self, *args: Any, **kwargs: Any) -> Any:
expected_args, ret = next(self.arg_spec_iterator)
if DEBUG:
print(f"({expected_args}) -> {ret}")
if expected_args is IgnoreArgument:
return ret
if len(args[0]) == len(expected_args):
for (i, arg) in enumerate(expected_args):
if arg is IgnoreArgument:
args[0][i] = IgnoreArgument
if expected_args != args[0]:
raise MockError(f"expected {expected_args}\n got {args[0]}")
return ret
class CliTestCase(TestCase):
def setUp(self) -> None:
os.chdir(os.path.join(TEST_ROOT, "assets/nixpkgs"))
self.directory = TemporaryDirectory()
os.environ["HOME"] = self.directory.name
os.environ["GITHUB_TOKEN"] = "0000000000000000000000000000000000000000"
def tearDown(self) -> None:
self.directory.cleanup()
build_cmds = [
(
[
"nix",
"--experimental-features",
"nix-command",
"eval",
"--json",
"--impure",
"--expr",
IgnoreArgument,
],
# hack to make sure the path exists
MockCompletedProcess(
stdout=(
'{"pong3d": {"exists": true, "broken": false, "path": "%s", "drvPath": "%s"}}'
% (__file__, __file__)
)
),
),
(
[
"nix",
"--experimental-features",
"nix-command",
"build",
"--no-link",
"--keep-going",
"--option",
"build-use-sandbox",
"relaxed",
"-f",
IgnoreArgument,
"--builders",
"ssh://[email protected] aarch64-linux",
],
MockCompletedProcess(),
),
(
[
"nix",
"--experimental-features",
"nix-command",
"eval",
"--impure",
"--raw",
"--expr",
"builtins.currentSystem",
],
MockCompletedProcess(stdout="x86_64-linux"),
),
(["nix-store", "--verify-path", IgnoreArgument], MockCompletedProcess()),
(
["nix", "--experimental-features", "nix-command", "log", IgnoreArgument],
MockCompletedProcess(),
),
(["nix-shell", IgnoreArgument], MockCompletedProcess()),
(["git", "worktree", "prune"], MockCompletedProcess()),
]
| <filename>nixpkgs_review/tests/cli_mocks.py<gh_stars>0
import os
from io import StringIO
from tempfile import TemporaryDirectory
from typing import Any, List, Optional, Tuple, Union
from unittest import TestCase
TEST_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = False
class IgnoreArgument:
def __repr__(self) -> str:
return "(ignored)"
def read_asset(asset: str) -> str:
with open(os.path.join(TEST_ROOT, "assets", asset)) as f:
return f.read()
class MockError(Exception):
pass
class MockCompletedProcess:
def __init__(self, stdout: Optional[Union[str, StringIO]] = None) -> None:
self.returncode = 0
self.stdout = stdout
class Mock:
def __init__(self, arg_spec: List[Tuple[Any, Any]]) -> None:
self.arg_spec_iterator = iter(arg_spec)
def __iter__(self) -> "Mock":
return self
def __call__(self, *args: Any, **kwargs: Any) -> Any:
expected_args, ret = next(self.arg_spec_iterator)
if DEBUG:
print(f"({expected_args}) -> {ret}")
if expected_args is IgnoreArgument:
return ret
if len(args[0]) == len(expected_args):
for (i, arg) in enumerate(expected_args):
if arg is IgnoreArgument:
args[0][i] = IgnoreArgument
if expected_args != args[0]:
raise MockError(f"expected {expected_args}\n got {args[0]}")
return ret
class CliTestCase(TestCase):
def setUp(self) -> None:
os.chdir(os.path.join(TEST_ROOT, "assets/nixpkgs"))
self.directory = TemporaryDirectory()
os.environ["HOME"] = self.directory.name
os.environ["GITHUB_TOKEN"] = "0000000000000000000000000000000000000000"
def tearDown(self) -> None:
self.directory.cleanup()
build_cmds = [
(
[
"nix",
"--experimental-features",
"nix-command",
"eval",
"--json",
"--impure",
"--expr",
IgnoreArgument,
],
# hack to make sure the path exists
MockCompletedProcess(
stdout=(
'{"pong3d": {"exists": true, "broken": false, "path": "%s", "drvPath": "%s"}}'
% (__file__, __file__)
)
),
),
(
[
"nix",
"--experimental-features",
"nix-command",
"build",
"--no-link",
"--keep-going",
"--option",
"build-use-sandbox",
"relaxed",
"-f",
IgnoreArgument,
"--builders",
"ssh://[email protected] aarch64-linux",
],
MockCompletedProcess(),
),
(
[
"nix",
"--experimental-features",
"nix-command",
"eval",
"--impure",
"--raw",
"--expr",
"builtins.currentSystem",
],
MockCompletedProcess(stdout="x86_64-linux"),
),
(["nix-store", "--verify-path", IgnoreArgument], MockCompletedProcess()),
(
["nix", "--experimental-features", "nix-command", "log", IgnoreArgument],
MockCompletedProcess(),
),
(["nix-shell", IgnoreArgument], MockCompletedProcess()),
(["git", "worktree", "prune"], MockCompletedProcess()),
]
| en | 0.900056 | # hack to make sure the path exists | 2.360085 | 2 |
code/Flask/app.py | rexroth-unofficial-ita/cpu-monitoring-ctrlx | 1 | 6630060 | <filename>code/Flask/app.py
from flask import Flask, redirect, render_template, request, session, url_for, Response
from werkzeug.serving import run_simple
from werkzeug.middleware.dispatcher import DispatcherMiddleware
import psutil
import json
import platform
import time
import subprocess
import os
import sys
# stdout is saved
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path+"/uploads")
app = Flask(__name__, static_url_path='')
app.config['APPLICATION_ROOT'] = '/cpumonitor'
os.chdir(dir_path)
# Uploads
@app.route('/cpumonitor/')
def index():
return render_template('index.html')
@app.route('/cpumonitor/staticdata',methods=['GET', 'POST'])
def stream():
return Response(getSystemInfo(), mimetype='json')
@app.route('/cpumonitor/usagedata',methods=['GET', 'POST'])
def data():
return Response(getSystemUsageInfo(), mimetype='json')
def getSystemInfo():
try:
info={}
info['platform']=platform.system()
info['kernel']=platform.release()
info['platform-version']=platform.version()
info['architecture']=platform.machine()
info['phcpu']=psutil.cpu_count(logical=False)
info['vrcpu']=psutil.cpu_count()
info['processor']=platform.processor()
info['ram']=str(round(psutil.virtual_memory().total / (1024.0 **3)))+" GB"
return json.dumps(info)
except Exception as e:
print(e)
def getSystemUsageInfo():
try:
info={}
info['CPU']=psutil.cpu_percent()
info['RAM']=psutil.virtual_memory().percent
info['temp']=str(psutil.sensors_temperatures())
return json.dumps(info)
except Exception as e:
print(e)
if __name__ == '__main__':
app.run(host='0.0.0.0',debug = False, port=12121)
| <filename>code/Flask/app.py
from flask import Flask, redirect, render_template, request, session, url_for, Response
from werkzeug.serving import run_simple
from werkzeug.middleware.dispatcher import DispatcherMiddleware
import psutil
import json
import platform
import time
import subprocess
import os
import sys
# stdout is saved
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path+"/uploads")
app = Flask(__name__, static_url_path='')
app.config['APPLICATION_ROOT'] = '/cpumonitor'
os.chdir(dir_path)
# Uploads
@app.route('/cpumonitor/')
def index():
return render_template('index.html')
@app.route('/cpumonitor/staticdata',methods=['GET', 'POST'])
def stream():
return Response(getSystemInfo(), mimetype='json')
@app.route('/cpumonitor/usagedata',methods=['GET', 'POST'])
def data():
return Response(getSystemUsageInfo(), mimetype='json')
def getSystemInfo():
try:
info={}
info['platform']=platform.system()
info['kernel']=platform.release()
info['platform-version']=platform.version()
info['architecture']=platform.machine()
info['phcpu']=psutil.cpu_count(logical=False)
info['vrcpu']=psutil.cpu_count()
info['processor']=platform.processor()
info['ram']=str(round(psutil.virtual_memory().total / (1024.0 **3)))+" GB"
return json.dumps(info)
except Exception as e:
print(e)
def getSystemUsageInfo():
try:
info={}
info['CPU']=psutil.cpu_percent()
info['RAM']=psutil.virtual_memory().percent
info['temp']=str(psutil.sensors_temperatures())
return json.dumps(info)
except Exception as e:
print(e)
if __name__ == '__main__':
app.run(host='0.0.0.0',debug = False, port=12121)
| en | 0.875952 | # stdout is saved # Uploads | 2.226156 | 2 |
allhub/repos/downloads.py | srinivasreddy/allhub | 2 | 6630061 | from allhub.response import Response
# TODO: Downloads API is deprecated.
# Write a deprecation warning.
class DownloadMixin:
def downloads(self, owner, repo):
url = "/repos/{owner}/{repo}/downloads".format(owner=owner, repo=repo)
self.response = Response(self.get(url), "Downloads")
return self.response.transform()
def download(self, owner, repo, download_id):
url = "/repos/{owner}/{repo}/downloads/{download_id}".format(
owner=owner, repo=repo, download_id=download_id
)
self.response = Response(self.get(url), "Downloads")
return self.response.transform()
def delete_download(self, owner, repo, download_id):
url = "/repos/{owner}/{repo}/downloads/{download_id}".format(
owner=owner, repo=repo, download_id=download_id
)
self.response = Response(self.delete(url), "")
if self.response.status_code == 204:
return True
raise ValueError(
"delete_download(.....) returned:{status_code}, instead it should return 204.".format(
status_code=self.response.status_code
)
)
| from allhub.response import Response
# TODO: Downloads API is deprecated.
# Write a deprecation warning.
class DownloadMixin:
def downloads(self, owner, repo):
url = "/repos/{owner}/{repo}/downloads".format(owner=owner, repo=repo)
self.response = Response(self.get(url), "Downloads")
return self.response.transform()
def download(self, owner, repo, download_id):
url = "/repos/{owner}/{repo}/downloads/{download_id}".format(
owner=owner, repo=repo, download_id=download_id
)
self.response = Response(self.get(url), "Downloads")
return self.response.transform()
def delete_download(self, owner, repo, download_id):
url = "/repos/{owner}/{repo}/downloads/{download_id}".format(
owner=owner, repo=repo, download_id=download_id
)
self.response = Response(self.delete(url), "")
if self.response.status_code == 204:
return True
raise ValueError(
"delete_download(.....) returned:{status_code}, instead it should return 204.".format(
status_code=self.response.status_code
)
)
| en | 0.700526 | # TODO: Downloads API is deprecated. # Write a deprecation warning. | 2.604451 | 3 |
google-cloud-sdk/lib/surface/dns/managed_zones/create.py | bopopescu/searchparty | 0 | 6630062 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zone create command."""
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.dns import flags
from googlecloudsdk.command_lib.dns import util as command_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
## EXAMPLES
To create a managed-zone, run:
$ {command} my_zone --dns-name my.zone.com. --description "My zone!"
"""
@staticmethod
def Args(parser):
flags.GetDnsZoneArg(
'The name of the managed-zone to be created.').AddToParser(parser)
flags.GetManagedZonesDnsNameArg().AddToParser(parser)
flags.GetManagedZonesDescriptionArg(required=True).AddToParser(parser)
parser.display_info.AddCacheUpdater(flags.ManagedZoneCompleter)
def Collection(self):
return 'dns.managedZones'
def Run(self, args):
dns = apis.GetClientInstance('dns', 'v1')
messages = apis.GetMessagesModule('dns', 'v1')
zone_ref = resources.REGISTRY.Parse(
args.dns_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='dns.managedZones')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return [result]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(base.CreateCommand):
r"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
## EXAMPLES
To create a managed-zone, run:
$ {command} my_zone --dns-name my.zone.com. --description "My zone!"
"""
UNUSED_DNSSEC_EXAMPLE = """
To create a managed-zone with DNSSEC, run:
$ {command} my_zone_2 --description "Signed Zone" \
--dns-name myzone.example \
--dnssec-state=on
"""
@staticmethod
def Args(parser):
flags.GetDnsZoneArg(
'The name of the managed-zone to be created.').AddToParser(parser)
flags.GetManagedZonesDnsNameArg().AddToParser(parser)
flags.GetManagedZonesDescriptionArg(required=True).AddToParser(parser)
flags.AddCommonManagedZonesDnssecArgs(parser)
parser.display_info.AddCacheUpdater(flags.ManagedZoneCompleter)
def Collection(self):
return 'dns.managedZones'
def Run(self, args):
dns = apis.GetClientInstance('dns', 'v2beta1')
messages = apis.GetMessagesModule('dns', 'v2beta1')
zone_ref = util.GetRegistry('v2beta1').Parse(
args.dns_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='dns.managedZones')
dnssec_config = None
if args.dnssec_state is not None:
dnssec_config = command_util.ParseDnssecConfigArgs(args, messages)
else:
bad_args = ['denial_of_existence', 'ksk_algorithm', 'zsk_algorithm',
'ksk_key_length', 'zsk_key_length']
for bad_arg in bad_args:
if getattr(args, bad_arg, None) is not None:
raise exceptions.InvalidArgumentException(
bad_arg,
'DNSSEC must be enabled in order to use other DNSSEC arguments. '
'Please set --dnssec-state to "on" or "transfer".')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description,
dnssecConfig=dnssec_config)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return [result]
| # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zone create command."""
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.dns import flags
from googlecloudsdk.command_lib.dns import util as command_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
## EXAMPLES
To create a managed-zone, run:
$ {command} my_zone --dns-name my.zone.com. --description "My zone!"
"""
@staticmethod
def Args(parser):
flags.GetDnsZoneArg(
'The name of the managed-zone to be created.').AddToParser(parser)
flags.GetManagedZonesDnsNameArg().AddToParser(parser)
flags.GetManagedZonesDescriptionArg(required=True).AddToParser(parser)
parser.display_info.AddCacheUpdater(flags.ManagedZoneCompleter)
def Collection(self):
return 'dns.managedZones'
def Run(self, args):
dns = apis.GetClientInstance('dns', 'v1')
messages = apis.GetMessagesModule('dns', 'v1')
zone_ref = resources.REGISTRY.Parse(
args.dns_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='dns.managedZones')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return [result]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(base.CreateCommand):
r"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
## EXAMPLES
To create a managed-zone, run:
$ {command} my_zone --dns-name my.zone.com. --description "My zone!"
"""
UNUSED_DNSSEC_EXAMPLE = """
To create a managed-zone with DNSSEC, run:
$ {command} my_zone_2 --description "Signed Zone" \
--dns-name myzone.example \
--dnssec-state=on
"""
@staticmethod
def Args(parser):
flags.GetDnsZoneArg(
'The name of the managed-zone to be created.').AddToParser(parser)
flags.GetManagedZonesDnsNameArg().AddToParser(parser)
flags.GetManagedZonesDescriptionArg(required=True).AddToParser(parser)
flags.AddCommonManagedZonesDnssecArgs(parser)
parser.display_info.AddCacheUpdater(flags.ManagedZoneCompleter)
def Collection(self):
return 'dns.managedZones'
def Run(self, args):
dns = apis.GetClientInstance('dns', 'v2beta1')
messages = apis.GetMessagesModule('dns', 'v2beta1')
zone_ref = util.GetRegistry('v2beta1').Parse(
args.dns_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='dns.managedZones')
dnssec_config = None
if args.dnssec_state is not None:
dnssec_config = command_util.ParseDnssecConfigArgs(args, messages)
else:
bad_args = ['denial_of_existence', 'ksk_algorithm', 'zsk_algorithm',
'ksk_key_length', 'zsk_key_length']
for bad_arg in bad_args:
if getattr(args, bad_arg, None) is not None:
raise exceptions.InvalidArgumentException(
bad_arg,
'DNSSEC must be enabled in order to use other DNSSEC arguments. '
'Please set --dnssec-state to "on" or "transfer".')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description,
dnssecConfig=dnssec_config)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return [result]
| en | 0.700752 | # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. gcloud dns managed-zone create command. Create a Cloud DNS managed-zone. This command creates a Cloud DNS managed-zone. ## EXAMPLES To create a managed-zone, run: $ {command} my_zone --dns-name my.zone.com. --description "My zone!" Create a Cloud DNS managed-zone. This command creates a Cloud DNS managed-zone. ## EXAMPLES To create a managed-zone, run: $ {command} my_zone --dns-name my.zone.com. --description "My zone!" To create a managed-zone with DNSSEC, run: $ {command} my_zone_2 --description "Signed Zone" \ --dns-name myzone.example \ --dnssec-state=on | 2.117997 | 2 |
tests/_files/tree_network_sim_expected_results/calc_temp_drop.py | rbv83/DHNx | 14 | 6630063 | # -*- coding: utf-8
"""
This script calculates the heat transfer at the consumer
for a simple tree network.
"""
import math
import os
import numpy as np
import pandas as pd
path_file = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(path_file, os.pardir, os.pardir, os.pardir))
input_data = os.path.join(path, "examples", "simulation", "tree")
result_path = os.path.join(path_file, "sequences")
if not os.path.exists(result_path):
os.mkdir(result_path)
def read_data(input_value):
r"""
This function is reading the data of a csv with a name given as input value
"""
return pd.read_csv(os.path.join(input_data, input_value + ".csv"), index_col=0)
# Read input data for every csv component
pipes = read_data("pipes")
temp_drop = pd.read_csv(input_data + "/sequences/consumers-delta_temp_drop.csv")
mass_flow = pd.read_csv(input_data + "/sequences/consumers-mass_flow.csv")
# Constants for calculation
t_env = 10 # [°C]
t_prod_i = pd.DataFrame(data={"t_prod_i": 130 * np.ones(len(mass_flow))}) # [°C]
c = 4190 # [J/kg*K]
pi = math.pi
# Initialize variables of type dataframe (needed for later calculations)
U_spec, t_cons_i, t_cons_r, t_fork_r, Q_loss_i, Q_loss_r, Q_cons, Q_loss_glob = [
pd.DataFrame() for variable in range(8)
]
# Adjust mass flows and temp drop to a dataframe containing all data in correct order
# Get mass flows of all consumers
mass_flow_total = mass_flow.iloc[:, 1:]
# Rename the columns to pipes naming convention
mass_flow_total.columns = ["1", "2"]
# Calculate producer mass flow as sum of consumer mass flows
mass_flow_total["0"] = mass_flow_total["1"] + mass_flow_total["2"]
# Change order of columns for later calculation
mass_flow_total = mass_flow_total[["0", "1", "2"]]
# Get temperature drop of all consumers
temp_drop = temp_drop.iloc[:, 1:]
# Rename the columns to pipes naming convention
temp_drop = temp_drop.rename(columns={"0": "1", "1": "2"})
def calc_temp_heat_loss(t_in, pos):
r"""
This function calculates the pipe's outlet temperature
out of the inlet temperature due to heat losses
Parameters
----------
t_in : Series
Temperature entering the pipe
pos : int
Position of node
Returns
-------
t_out : Series
Temperature leaving the pipe
"""
t_out = t_env + (t_in - t_env) * np.exp(
-pipes["heat_transfer_coefficient"].iloc[pos]
* pi
* pipes["diameter"].iloc[pos]
/ 1000
* pipes["length"].iloc[pos]
/ (c * mass_flow_total[str(pos)])
)
return t_out
def calc_heat_loss(m, t_in, t_out):
r"""
This function calculates heat losses
Needs to be adapted in case heat capacity is not constant as assumed
Parameters
----------
m : Series
Mass flow [kg/s]
t_in : Series
Inlet temperature [K]
t_out : Series
Outlet temperature [K]
Returns
-------
Heat flow [W]
"""
return m * c * (t_in - t_out)
# Calculate inlet temperature at fork
t_fork_i = pd.DataFrame(data={"0": calc_temp_heat_loss(t_prod_i["t_prod_i"], 0)})
# Calculate heat loss at pipe from producer to fork
Q_loss_i["0"] = calc_heat_loss(
mass_flow_total["0"], t_prod_i["t_prod_i"], t_fork_i["0"]
)
for index in list(temp_drop):
# Calculate inlet temperature at consumers
t_cons_i[index] = calc_temp_heat_loss(t_fork_i["0"], int(index))
# Calculate return temperature at consumers
t_cons_r[index] = t_cons_i[index] - temp_drop[index]
# Calculate return temperature at fork
t_fork_r[index] = calc_temp_heat_loss(t_cons_r[index], int(index))
# Calculate heat losses at pipe from fork to consumer
Q_loss_i[index] = calc_heat_loss(
mass_flow_total[index], t_fork_i["0"], t_cons_i[index]
)
# Calculate heat losses at pipe from consumer to fork
Q_loss_r[index] = calc_heat_loss(
mass_flow_total[index], t_cons_r[index], t_fork_r[index]
)
# Calculate heat transfer at consumers with temperature drop
Q_cons[index] = calc_heat_loss(
mass_flow_total[index], t_cons_i[index], t_cons_r[index]
)
# Calculate temperature of mixture at fork return
# Note with these input values irrelevant
# because the temperatures coming from the consumers are the same.
# Needs to be adapted in case capacity is not constant as assumed
t_fork_r_mix = pd.DataFrame(
data={
"0": (
mass_flow_total["1"] * t_fork_r["1"] + mass_flow_total["2"] * t_fork_r["2"]
)
/ mass_flow_total["0"]
}
)
# Calculate return temperature at producer
t_prod_r = pd.DataFrame(data={"0": calc_temp_heat_loss(t_fork_r_mix["0"], int("0"))})
# Calculate inlet temperature of nodes
t_nodes_i = pd.DataFrame(
data={
"producers-0": t_prod_i["t_prod_i"],
"forks-0": t_fork_i["0"],
"consumers-0": t_cons_i["1"],
"consumers-1": t_cons_i["2"],
}
)
# Calculate return temperature of nodes
t_nodes_r = pd.DataFrame(
data={
"producers-0": t_prod_r["0"],
"forks-0": t_fork_r_mix["0"],
"consumers-0": t_cons_r["1"],
"consumers-1": t_cons_r["2"],
}
)
# Calculate heat loss at pipe from fork to producer
Q_loss_r["0"] = calc_heat_loss(mass_flow_total["0"], t_fork_r_mix["0"], t_prod_r["0"])
# Calculate total heat losses (inlet and return)
Q_loss = Q_loss_i + Q_loss_r
# Calculate global heat losses
Q_loss_glob = pd.DataFrame(data={"global_heat_losses": np.zeros(len(mass_flow_total))})
for index, node in enumerate(mass_flow_total):
Q_loss_glob["global_heat_losses"] = (
Q_loss_glob["global_heat_losses"] + Q_loss[str(index)]
)
# Print results
def parameters():
r"""
Writes results in Dictionary
Returns
-------
parameter : dict
Dictionary with results
"""
param_dict = {
"Mass flow [kg/s]": mass_flow_total,
"Inlet temperature at producer T_prod_i [°C]": t_prod_i,
"Return temperature at producer T_prod_r [°C]": t_prod_r,
"Inlet temperature at fork T_fork_i [°C]": t_fork_i,
"Return temperature at fork T_fork_r [°C]": t_fork_r_mix,
"Inlet temperature at consumer T_c_i [°C]": t_cons_i,
"Return temperature at consumer T_c_out [°C]": t_cons_r,
"Inlet temperature nodes T_nodes_i [°C]": t_nodes_i,
"Return temperature nodes T_nodes_r [°C]": t_nodes_r,
"Heat losses Q_loss [W]": Q_loss,
"Global heat losses Q_loss_glob [W]": Q_loss_glob,
"Heat transfer at consumers Q [W]": Q_cons,
}
return param_dict
parameter = parameters()
def print_parameters():
r"""
Prints the parameters
"""
dash = "-" * 60
print("\n" + dash)
print("Results at producer (0), consumer 1 (1) and consumer 2 (2)")
print(dash)
for name, param in parameter.items():
print(name + "\n" + str(param) + "\n\n")
print(dash)
print_parameters()
# Save results to csv
for value, params in parameter.items():
params.insert(0, "snapshot", np.arange(len(mass_flow_total)))
result_name = [
"pipes-mass_flow.csv",
"nodes-temp_inlet.csv",
"nodes-temp_return.csv",
"pipes-heat_losses.csv",
"global-heat_losses.csv",
]
result_list = [list(parameter.keys())[0]] + list(parameter.keys())[7:11]
for index, value in enumerate(result_list):
parameter[value].to_csv(os.path.join(result_path, result_name[index]), index=False)
| # -*- coding: utf-8
"""
This script calculates the heat transfer at the consumer
for a simple tree network.
"""
import math
import os
import numpy as np
import pandas as pd
path_file = os.path.dirname(__file__)
path = os.path.abspath(os.path.join(path_file, os.pardir, os.pardir, os.pardir))
input_data = os.path.join(path, "examples", "simulation", "tree")
result_path = os.path.join(path_file, "sequences")
if not os.path.exists(result_path):
os.mkdir(result_path)
def read_data(input_value):
r"""
This function is reading the data of a csv with a name given as input value
"""
return pd.read_csv(os.path.join(input_data, input_value + ".csv"), index_col=0)
# Read input data for every csv component
pipes = read_data("pipes")
temp_drop = pd.read_csv(input_data + "/sequences/consumers-delta_temp_drop.csv")
mass_flow = pd.read_csv(input_data + "/sequences/consumers-mass_flow.csv")
# Constants for calculation
t_env = 10 # [°C]
t_prod_i = pd.DataFrame(data={"t_prod_i": 130 * np.ones(len(mass_flow))}) # [°C]
c = 4190 # [J/kg*K]
pi = math.pi
# Initialize variables of type dataframe (needed for later calculations)
U_spec, t_cons_i, t_cons_r, t_fork_r, Q_loss_i, Q_loss_r, Q_cons, Q_loss_glob = [
pd.DataFrame() for variable in range(8)
]
# Adjust mass flows and temp drop to a dataframe containing all data in correct order
# Get mass flows of all consumers
mass_flow_total = mass_flow.iloc[:, 1:]
# Rename the columns to pipes naming convention
mass_flow_total.columns = ["1", "2"]
# Calculate producer mass flow as sum of consumer mass flows
mass_flow_total["0"] = mass_flow_total["1"] + mass_flow_total["2"]
# Change order of columns for later calculation
mass_flow_total = mass_flow_total[["0", "1", "2"]]
# Get temperature drop of all consumers
temp_drop = temp_drop.iloc[:, 1:]
# Rename the columns to pipes naming convention
temp_drop = temp_drop.rename(columns={"0": "1", "1": "2"})
def calc_temp_heat_loss(t_in, pos):
r"""
This function calculates the pipe's outlet temperature
out of the inlet temperature due to heat losses
Parameters
----------
t_in : Series
Temperature entering the pipe
pos : int
Position of node
Returns
-------
t_out : Series
Temperature leaving the pipe
"""
t_out = t_env + (t_in - t_env) * np.exp(
-pipes["heat_transfer_coefficient"].iloc[pos]
* pi
* pipes["diameter"].iloc[pos]
/ 1000
* pipes["length"].iloc[pos]
/ (c * mass_flow_total[str(pos)])
)
return t_out
def calc_heat_loss(m, t_in, t_out):
r"""
This function calculates heat losses
Needs to be adapted in case heat capacity is not constant as assumed
Parameters
----------
m : Series
Mass flow [kg/s]
t_in : Series
Inlet temperature [K]
t_out : Series
Outlet temperature [K]
Returns
-------
Heat flow [W]
"""
return m * c * (t_in - t_out)
# Calculate inlet temperature at fork
t_fork_i = pd.DataFrame(data={"0": calc_temp_heat_loss(t_prod_i["t_prod_i"], 0)})
# Calculate heat loss at pipe from producer to fork
Q_loss_i["0"] = calc_heat_loss(
mass_flow_total["0"], t_prod_i["t_prod_i"], t_fork_i["0"]
)
for index in list(temp_drop):
# Calculate inlet temperature at consumers
t_cons_i[index] = calc_temp_heat_loss(t_fork_i["0"], int(index))
# Calculate return temperature at consumers
t_cons_r[index] = t_cons_i[index] - temp_drop[index]
# Calculate return temperature at fork
t_fork_r[index] = calc_temp_heat_loss(t_cons_r[index], int(index))
# Calculate heat losses at pipe from fork to consumer
Q_loss_i[index] = calc_heat_loss(
mass_flow_total[index], t_fork_i["0"], t_cons_i[index]
)
# Calculate heat losses at pipe from consumer to fork
Q_loss_r[index] = calc_heat_loss(
mass_flow_total[index], t_cons_r[index], t_fork_r[index]
)
# Calculate heat transfer at consumers with temperature drop
Q_cons[index] = calc_heat_loss(
mass_flow_total[index], t_cons_i[index], t_cons_r[index]
)
# Calculate temperature of mixture at fork return
# Note with these input values irrelevant
# because the temperatures coming from the consumers are the same.
# Needs to be adapted in case capacity is not constant as assumed
t_fork_r_mix = pd.DataFrame(
data={
"0": (
mass_flow_total["1"] * t_fork_r["1"] + mass_flow_total["2"] * t_fork_r["2"]
)
/ mass_flow_total["0"]
}
)
# Calculate return temperature at producer
t_prod_r = pd.DataFrame(data={"0": calc_temp_heat_loss(t_fork_r_mix["0"], int("0"))})
# Calculate inlet temperature of nodes
t_nodes_i = pd.DataFrame(
data={
"producers-0": t_prod_i["t_prod_i"],
"forks-0": t_fork_i["0"],
"consumers-0": t_cons_i["1"],
"consumers-1": t_cons_i["2"],
}
)
# Calculate return temperature of nodes
t_nodes_r = pd.DataFrame(
data={
"producers-0": t_prod_r["0"],
"forks-0": t_fork_r_mix["0"],
"consumers-0": t_cons_r["1"],
"consumers-1": t_cons_r["2"],
}
)
# Calculate heat loss at pipe from fork to producer
Q_loss_r["0"] = calc_heat_loss(mass_flow_total["0"], t_fork_r_mix["0"], t_prod_r["0"])
# Calculate total heat losses (inlet and return)
Q_loss = Q_loss_i + Q_loss_r
# Calculate global heat losses
Q_loss_glob = pd.DataFrame(data={"global_heat_losses": np.zeros(len(mass_flow_total))})
for index, node in enumerate(mass_flow_total):
Q_loss_glob["global_heat_losses"] = (
Q_loss_glob["global_heat_losses"] + Q_loss[str(index)]
)
# Print results
def parameters():
r"""
Writes results in Dictionary
Returns
-------
parameter : dict
Dictionary with results
"""
param_dict = {
"Mass flow [kg/s]": mass_flow_total,
"Inlet temperature at producer T_prod_i [°C]": t_prod_i,
"Return temperature at producer T_prod_r [°C]": t_prod_r,
"Inlet temperature at fork T_fork_i [°C]": t_fork_i,
"Return temperature at fork T_fork_r [°C]": t_fork_r_mix,
"Inlet temperature at consumer T_c_i [°C]": t_cons_i,
"Return temperature at consumer T_c_out [°C]": t_cons_r,
"Inlet temperature nodes T_nodes_i [°C]": t_nodes_i,
"Return temperature nodes T_nodes_r [°C]": t_nodes_r,
"Heat losses Q_loss [W]": Q_loss,
"Global heat losses Q_loss_glob [W]": Q_loss_glob,
"Heat transfer at consumers Q [W]": Q_cons,
}
return param_dict
parameter = parameters()
def print_parameters():
r"""
Prints the parameters
"""
dash = "-" * 60
print("\n" + dash)
print("Results at producer (0), consumer 1 (1) and consumer 2 (2)")
print(dash)
for name, param in parameter.items():
print(name + "\n" + str(param) + "\n\n")
print(dash)
print_parameters()
# Save results to csv
for value, params in parameter.items():
params.insert(0, "snapshot", np.arange(len(mass_flow_total)))
result_name = [
"pipes-mass_flow.csv",
"nodes-temp_inlet.csv",
"nodes-temp_return.csv",
"pipes-heat_losses.csv",
"global-heat_losses.csv",
]
result_list = [list(parameter.keys())[0]] + list(parameter.keys())[7:11]
for index, value in enumerate(result_list):
parameter[value].to_csv(os.path.join(result_path, result_name[index]), index=False)
| en | 0.802065 | # -*- coding: utf-8 This script calculates the heat transfer at the consumer for a simple tree network. This function is reading the data of a csv with a name given as input value # Read input data for every csv component # Constants for calculation # [°C] # [°C] # [J/kg*K] # Initialize variables of type dataframe (needed for later calculations) # Adjust mass flows and temp drop to a dataframe containing all data in correct order # Get mass flows of all consumers # Rename the columns to pipes naming convention # Calculate producer mass flow as sum of consumer mass flows # Change order of columns for later calculation # Get temperature drop of all consumers # Rename the columns to pipes naming convention This function calculates the pipe's outlet temperature out of the inlet temperature due to heat losses Parameters ---------- t_in : Series Temperature entering the pipe pos : int Position of node Returns ------- t_out : Series Temperature leaving the pipe This function calculates heat losses Needs to be adapted in case heat capacity is not constant as assumed Parameters ---------- m : Series Mass flow [kg/s] t_in : Series Inlet temperature [K] t_out : Series Outlet temperature [K] Returns ------- Heat flow [W] # Calculate inlet temperature at fork # Calculate heat loss at pipe from producer to fork # Calculate inlet temperature at consumers # Calculate return temperature at consumers # Calculate return temperature at fork # Calculate heat losses at pipe from fork to consumer # Calculate heat losses at pipe from consumer to fork # Calculate heat transfer at consumers with temperature drop # Calculate temperature of mixture at fork return # Note with these input values irrelevant # because the temperatures coming from the consumers are the same. # Needs to be adapted in case capacity is not constant as assumed # Calculate return temperature at producer # Calculate inlet temperature of nodes # Calculate return temperature of nodes # Calculate heat loss at pipe from fork to producer # Calculate total heat losses (inlet and return) # Calculate global heat losses # Print results Writes results in Dictionary Returns ------- parameter : dict Dictionary with results Prints the parameters # Save results to csv | 3.019569 | 3 |
poky-dunfell/bitbake/lib/bb/ui/teamcity.py | lacie-life/YoctoPi | 14 | 6630064 | <reponame>lacie-life/YoctoPi
#
# TeamCity UI Implementation
#
# Implements a TeamCity frontend for the BitBake utility, via service messages.
# See https://www.jetbrains.com/help/teamcity/build-script-interaction-with-teamcity.html
#
# Based on ncurses.py and knotty.py, variously by <NAME> and <NAME>
#
# Copyright (C) 2006 Michael '<NAME>
# Copyright (C) 2006-2012 <NAME>
# Copyright (C) 2018-2020 Agilent Technologies, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Author: <NAME> <<EMAIL>>
from __future__ import division
import datetime
import logging
import math
import os
import re
import sys
import xmlrpc.client
from collections import deque
import bb
import bb.build
import bb.command
import bb.cooker
import bb.event
import bb.exceptions
import bb.runqueue
from bb.ui import uihelper
logger = logging.getLogger("BitBake")
class TeamCityUI:
def __init__(self):
self._block_stack = []
self._last_progress_state = None
@classmethod
def escape_service_value(cls, value):
"""
Escape a value for inclusion in a service message. TeamCity uses the vertical pipe character for escaping.
See: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-Escapedvalues
"""
return re.sub(r"(['|\[\]])", r"|\1", value).replace("\n", "|n").replace("\r", "|r")
@classmethod
def emit_service_message(cls, message_type, **kwargs):
print(cls.format_service_message(message_type, **kwargs), flush=True)
@classmethod
def format_service_message(cls, message_type, **kwargs):
payload = " ".join(["{0}='{1}'".format(k, cls.escape_service_value(v)) for k, v in kwargs.items()])
return "##teamcity[{0} {1}]".format(message_type, payload)
@classmethod
def emit_simple_service_message(cls, message_type, message):
print(cls.format_simple_service_message(message_type, message), flush=True)
@classmethod
def format_simple_service_message(cls, message_type, message):
return "##teamcity[{0} '{1}']".format(message_type, cls.escape_service_value(message))
@classmethod
def format_build_message(cls, text, status):
return cls.format_service_message("message", text=text, status=status)
def block_start(self, name):
self._block_stack.append(name)
self.emit_service_message("blockOpened", name=name)
def block_end(self):
if self._block_stack:
name = self._block_stack.pop()
self.emit_service_message("blockClosed", name=name)
def progress(self, message, percent, extra=None):
now = datetime.datetime.now()
percent = "{0: >3.0f}".format(percent)
report = False
if not self._last_progress_state \
or (self._last_progress_state[0] == message
and self._last_progress_state[1] != percent
and (now - self._last_progress_state[2]).microseconds >= 5000) \
or self._last_progress_state[0] != message:
report = True
self._last_progress_state = (message, percent, now)
if report or percent in [0, 100]:
self.emit_simple_service_message("progressMessage", "{0}: {1}%{2}".format(message, percent, extra or ""))
class TeamcityLogFormatter(logging.Formatter):
def format(self, record):
details = ""
if hasattr(record, 'bb_exc_formatted'):
details = ''.join(record.bb_exc_formatted)
elif hasattr(record, 'bb_exc_info'):
etype, value, tb = record.bb_exc_info
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
details = ''.join(formatted)
if record.levelno in [bb.msg.BBLogFormatter.ERROR, bb.msg.BBLogFormatter.CRITICAL]:
# ERROR gets a separate errorDetails field
msg = TeamCityUI.format_service_message("message", text=record.getMessage(), status="ERROR",
errorDetails=details)
else:
payload = record.getMessage()
if details:
payload += "\n" + details
if record.levelno == bb.msg.BBLogFormatter.PLAIN:
msg = payload
elif record.levelno == bb.msg.BBLogFormatter.WARNING:
msg = TeamCityUI.format_service_message("message", text=payload, status="WARNING")
else:
msg = TeamCityUI.format_service_message("message", text=payload, status="NORMAL")
return msg
_evt_list = ["bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.LogRecord",
"bb.build.TaskFailed", "bb.build.TaskBase", "bb.event.ParseStarted",
"bb.event.ParseProgress", "bb.event.ParseCompleted", "bb.event.CacheLoadStarted",
"bb.event.CacheLoadProgress", "bb.event.CacheLoadCompleted", "bb.command.CommandFailed",
"bb.command.CommandExit", "bb.command.CommandCompleted", "bb.cooker.CookerExit",
"bb.event.MultipleProviders", "bb.event.NoProvider", "bb.runqueue.sceneQueueTaskStarted",
"bb.runqueue.runQueueTaskStarted", "bb.runqueue.runQueueTaskFailed", "bb.runqueue.sceneQueueTaskFailed",
"bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent",
"bb.build.TaskProgress", "bb.event.ProcessStarted", "bb.event.ProcessProgress", "bb.event.ProcessFinished"]
def _log_settings_from_server(server):
# Get values of variables which control our output
includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
if error:
logger.error("Unable to get the value of BBINCLUDELOGS variable: %s" % error)
raise BaseException(error)
loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
if error:
logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error)
raise BaseException(error)
return includelogs, loglines
def main(server, eventHandler, params):
params.updateToServer(server, os.environ.copy())
includelogs, loglines = _log_settings_from_server(server)
ui = TeamCityUI()
helper = uihelper.BBUIHelper()
console = logging.StreamHandler(sys.stdout)
errconsole = logging.StreamHandler(sys.stderr)
format = TeamcityLogFormatter()
if params.options.quiet == 0:
forcelevel = None
elif params.options.quiet > 2:
forcelevel = bb.msg.BBLogFormatter.ERROR
else:
forcelevel = bb.msg.BBLogFormatter.WARNING
console.setFormatter(format)
errconsole.setFormatter(format)
if not bb.msg.has_console_handler(logger):
logger.addHandler(console)
logger.addHandler(errconsole)
if params.options.remote_server and params.options.kill_server:
server.terminateServer()
return
if params.observe_only:
logger.error("Observe-only mode not supported in this UI")
return 1
llevel, debug_domains = bb.msg.constructLogOptions()
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
try:
params.updateFromServer(server)
cmdline = params.parseActions()
if not cmdline:
logger.error("No task given")
return 1
if 'msg' in cmdline and cmdline['msg']:
logger.error(cmdline['msg'])
return 1
cmdline = cmdline['action']
ret, error = server.runCommand(cmdline)
if error:
logger.error("{0}: {1}".format(cmdline, error))
return 1
elif not ret:
logger.error("Couldn't get default commandline: {0}".format(re))
return 1
except xmlrpc.client.Fault as x:
logger.error("XMLRPC Fault getting commandline: {0}".format(x))
return 1
active_process_total = None
is_tasks_running = False
while True:
try:
event = eventHandler.waitEvent(0.25)
if not event:
continue
helper.eventHandler(event)
if isinstance(event, bb.build.TaskBase):
logger.info(event._message)
if isinstance(event, logging.LogRecord):
# Don't report sstate failures as errors, since Yocto will just run the tasks for real
if event.msg == "No suitable staging package found" or (event.msg.startswith(
"Fetcher failure: Unable to find file") and "downloadfilename" in event.msg and "sstate" in event.msg):
event.levelno = bb.msg.BBLogFormatter.WARNING
if event.taskpid != 0:
# For "normal" logging conditions, don't show note logs from tasks
# but do show them if the user has changed the default log level to
# include verbose/debug messages
if event.levelno <= bb.msg.BBLogFormatter.NOTE and (event.levelno < llevel or (
event.levelno == bb.msg.BBLogFormatter.NOTE and llevel != bb.msg.BBLogFormatter.VERBOSE)):
continue
# Prefix task messages with recipe/task
if event.taskpid in helper.running_tasks and event.levelno != bb.msg.BBLogFormatter.PLAIN:
taskinfo = helper.running_tasks[event.taskpid]
event.msg = taskinfo['title'] + ': ' + event.msg
if hasattr(event, 'fn'):
event.msg = event.fn + ': ' + event.msg
logger.handle(event)
if isinstance(event, bb.build.TaskFailedSilent):
logger.warning("Logfile for failed setscene task is %s" % event.logfile)
continue
if isinstance(event, bb.build.TaskFailed):
rt = "{0}-{1}:{2}".format(event.pn, event.pv.replace("AUTOINC", "0"), event.task)
logfile = event.logfile
if not logfile or not os.path.exists(logfile):
TeamCityUI.emit_service_message("buildProblem", description="{0}\nUnknown failure (no log file available)".format(rt))
if not event.task.endswith("_setscene"):
server.runCommand(["stateForceShutdown"])
continue
details = deque(maxlen=loglines)
error_lines = []
if includelogs and not event.errprinted:
with open(logfile, "r") as f:
while True:
line = f.readline()
if not line:
break
line = line.rstrip()
details.append(' | %s' % line)
# TODO: a less stupid check for errors
if (event.task == "do_compile") and ("error:" in line):
error_lines.append(line)
if error_lines:
TeamCityUI.emit_service_message("compilationStarted", compiler=rt)
for line in error_lines:
TeamCityUI.emit_service_message("message", text=line, status="ERROR")
TeamCityUI.emit_service_message("compilationFinished", compiler=rt)
else:
TeamCityUI.emit_service_message("buildProblem", description=rt)
err = "Logfile of failure stored in: %s" % logfile
if details:
ui.block_start("{0} task log".format(rt))
# TeamCity seems to choke on service messages longer than about 63800 characters, so if error
# details is longer than, say, 60000, batch it up into several messages.
first_message = True
while details:
detail_len = 0
batch = deque()
while details and detail_len < 60000:
# TODO: This code doesn't bother to handle lines that themselves are extremely long.
line = details.popleft()
batch.append(line)
detail_len += len(line)
if first_message:
batch.appendleft("Log data follows:")
first_message = False
TeamCityUI.emit_service_message("message", text=err, status="ERROR",
errorDetails="\n".join(batch))
else:
TeamCityUI.emit_service_message("message", text="[continued]", status="ERROR",
errorDetails="\n".join(batch))
ui.block_end()
else:
TeamCityUI.emit_service_message("message", text=err, status="ERROR", errorDetails="")
if not event.task.endswith("_setscene"):
server.runCommand(["stateForceShutdown"])
if isinstance(event, bb.event.ProcessStarted):
if event.processname in ["Initialising tasks", "Checking sstate mirror object availability"]:
active_process_total = event.total
ui.block_start(event.processname)
if isinstance(event, bb.event.ProcessFinished):
if event.processname in ["Initialising tasks", "Checking sstate mirror object availability"]:
ui.progress(event.processname, 100)
ui.block_end()
if isinstance(event, bb.event.ProcessProgress):
if event.processname in ["Initialising tasks",
"Checking sstate mirror object availability"] and active_process_total != 0:
ui.progress(event.processname, event.progress * 100 / active_process_total)
if isinstance(event, bb.event.CacheLoadStarted):
ui.block_start("Loading cache")
if isinstance(event, bb.event.CacheLoadProgress):
if event.total != 0:
ui.progress("Loading cache", math.floor(event.current * 100 / event.total))
if isinstance(event, bb.event.CacheLoadCompleted):
ui.progress("Loading cache", 100)
ui.block_end()
if isinstance(event, bb.event.ParseStarted):
ui.block_start("Parsing recipes and checking upstream revisions")
if isinstance(event, bb.event.ParseProgress):
if event.total != 0:
ui.progress("Parsing recipes", math.floor(event.current * 100 / event.total))
if isinstance(event, bb.event.ParseCompleted):
ui.progress("Parsing recipes", 100)
ui.block_end()
if isinstance(event, bb.command.CommandCompleted):
return
if isinstance(event, bb.command.CommandFailed):
logger.error(str(event))
return 1
if isinstance(event, bb.event.MultipleProviders):
logger.warning(str(event))
continue
if isinstance(event, bb.event.NoProvider):
logger.error(str(event))
continue
if isinstance(event, bb.command.CommandExit):
return
if isinstance(event, bb.cooker.CookerExit):
return
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
if not is_tasks_running:
is_tasks_running = True
ui.block_start("Running tasks")
if event.stats.total != 0:
ui.progress("Running setscene tasks", (
event.stats.completed + event.stats.active + event.stats.failed + 1) * 100 / event.stats.total)
if isinstance(event, bb.runqueue.runQueueTaskStarted):
if not is_tasks_running:
is_tasks_running = True
ui.block_start("Running tasks")
if event.stats.total != 0:
pseudo_total = event.stats.total - event.stats.skipped
pseudo_complete = event.stats.completed + event.stats.active - event.stats.skipped + event.stats.failed + 1
# TODO: sometimes this gives over 100%
ui.progress("Running runqueue tasks", (pseudo_complete) * 100 / pseudo_total,
" ({0}/{1})".format(pseudo_complete, pseudo_total))
if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
logger.warning(str(event))
continue
if isinstance(event, bb.runqueue.runQueueTaskFailed):
logger.error(str(event))
return 1
if isinstance(event, bb.event.LogExecTTY):
pass
except EnvironmentError as ioerror:
# ignore interrupted io
if ioerror.args[0] == 4:
pass
except Exception as ex:
logger.error(str(ex))
# except KeyboardInterrupt:
# if shutdown == 2:
# mw.appendText("Third Keyboard Interrupt, exit.\n")
# exitflag = True
# if shutdown == 1:
# mw.appendText("Second Keyboard Interrupt, stopping...\n")
# _, error = server.runCommand(["stateForceShutdown"])
# if error:
# print("Unable to cleanly stop: %s" % error)
# if shutdown == 0:
# mw.appendText("Keyboard Interrupt, closing down...\n")
# _, error = server.runCommand(["stateShutdown"])
# if error:
# print("Unable to cleanly shutdown: %s" % error)
# shutdown = shutdown + 1
# pass
| #
# TeamCity UI Implementation
#
# Implements a TeamCity frontend for the BitBake utility, via service messages.
# See https://www.jetbrains.com/help/teamcity/build-script-interaction-with-teamcity.html
#
# Based on ncurses.py and knotty.py, variously by <NAME> and <NAME>
#
# Copyright (C) 2006 Michael '<NAME>
# Copyright (C) 2006-2012 <NAME>
# Copyright (C) 2018-2020 Agilent Technologies, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Author: <NAME> <<EMAIL>>
from __future__ import division
import datetime
import logging
import math
import os
import re
import sys
import xmlrpc.client
from collections import deque
import bb
import bb.build
import bb.command
import bb.cooker
import bb.event
import bb.exceptions
import bb.runqueue
from bb.ui import uihelper
logger = logging.getLogger("BitBake")
class TeamCityUI:
def __init__(self):
self._block_stack = []
self._last_progress_state = None
@classmethod
def escape_service_value(cls, value):
"""
Escape a value for inclusion in a service message. TeamCity uses the vertical pipe character for escaping.
See: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-Escapedvalues
"""
return re.sub(r"(['|\[\]])", r"|\1", value).replace("\n", "|n").replace("\r", "|r")
@classmethod
def emit_service_message(cls, message_type, **kwargs):
print(cls.format_service_message(message_type, **kwargs), flush=True)
@classmethod
def format_service_message(cls, message_type, **kwargs):
payload = " ".join(["{0}='{1}'".format(k, cls.escape_service_value(v)) for k, v in kwargs.items()])
return "##teamcity[{0} {1}]".format(message_type, payload)
@classmethod
def emit_simple_service_message(cls, message_type, message):
print(cls.format_simple_service_message(message_type, message), flush=True)
@classmethod
def format_simple_service_message(cls, message_type, message):
return "##teamcity[{0} '{1}']".format(message_type, cls.escape_service_value(message))
@classmethod
def format_build_message(cls, text, status):
return cls.format_service_message("message", text=text, status=status)
def block_start(self, name):
self._block_stack.append(name)
self.emit_service_message("blockOpened", name=name)
def block_end(self):
if self._block_stack:
name = self._block_stack.pop()
self.emit_service_message("blockClosed", name=name)
def progress(self, message, percent, extra=None):
now = datetime.datetime.now()
percent = "{0: >3.0f}".format(percent)
report = False
if not self._last_progress_state \
or (self._last_progress_state[0] == message
and self._last_progress_state[1] != percent
and (now - self._last_progress_state[2]).microseconds >= 5000) \
or self._last_progress_state[0] != message:
report = True
self._last_progress_state = (message, percent, now)
if report or percent in [0, 100]:
self.emit_simple_service_message("progressMessage", "{0}: {1}%{2}".format(message, percent, extra or ""))
class TeamcityLogFormatter(logging.Formatter):
def format(self, record):
details = ""
if hasattr(record, 'bb_exc_formatted'):
details = ''.join(record.bb_exc_formatted)
elif hasattr(record, 'bb_exc_info'):
etype, value, tb = record.bb_exc_info
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
details = ''.join(formatted)
if record.levelno in [bb.msg.BBLogFormatter.ERROR, bb.msg.BBLogFormatter.CRITICAL]:
# ERROR gets a separate errorDetails field
msg = TeamCityUI.format_service_message("message", text=record.getMessage(), status="ERROR",
errorDetails=details)
else:
payload = record.getMessage()
if details:
payload += "\n" + details
if record.levelno == bb.msg.BBLogFormatter.PLAIN:
msg = payload
elif record.levelno == bb.msg.BBLogFormatter.WARNING:
msg = TeamCityUI.format_service_message("message", text=payload, status="WARNING")
else:
msg = TeamCityUI.format_service_message("message", text=payload, status="NORMAL")
return msg
_evt_list = ["bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.LogRecord",
"bb.build.TaskFailed", "bb.build.TaskBase", "bb.event.ParseStarted",
"bb.event.ParseProgress", "bb.event.ParseCompleted", "bb.event.CacheLoadStarted",
"bb.event.CacheLoadProgress", "bb.event.CacheLoadCompleted", "bb.command.CommandFailed",
"bb.command.CommandExit", "bb.command.CommandCompleted", "bb.cooker.CookerExit",
"bb.event.MultipleProviders", "bb.event.NoProvider", "bb.runqueue.sceneQueueTaskStarted",
"bb.runqueue.runQueueTaskStarted", "bb.runqueue.runQueueTaskFailed", "bb.runqueue.sceneQueueTaskFailed",
"bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent",
"bb.build.TaskProgress", "bb.event.ProcessStarted", "bb.event.ProcessProgress", "bb.event.ProcessFinished"]
def _log_settings_from_server(server):
# Get values of variables which control our output
includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
if error:
logger.error("Unable to get the value of BBINCLUDELOGS variable: %s" % error)
raise BaseException(error)
loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
if error:
logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error)
raise BaseException(error)
return includelogs, loglines
def main(server, eventHandler, params):
params.updateToServer(server, os.environ.copy())
includelogs, loglines = _log_settings_from_server(server)
ui = TeamCityUI()
helper = uihelper.BBUIHelper()
console = logging.StreamHandler(sys.stdout)
errconsole = logging.StreamHandler(sys.stderr)
format = TeamcityLogFormatter()
if params.options.quiet == 0:
forcelevel = None
elif params.options.quiet > 2:
forcelevel = bb.msg.BBLogFormatter.ERROR
else:
forcelevel = bb.msg.BBLogFormatter.WARNING
console.setFormatter(format)
errconsole.setFormatter(format)
if not bb.msg.has_console_handler(logger):
logger.addHandler(console)
logger.addHandler(errconsole)
if params.options.remote_server and params.options.kill_server:
server.terminateServer()
return
if params.observe_only:
logger.error("Observe-only mode not supported in this UI")
return 1
llevel, debug_domains = bb.msg.constructLogOptions()
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
try:
params.updateFromServer(server)
cmdline = params.parseActions()
if not cmdline:
logger.error("No task given")
return 1
if 'msg' in cmdline and cmdline['msg']:
logger.error(cmdline['msg'])
return 1
cmdline = cmdline['action']
ret, error = server.runCommand(cmdline)
if error:
logger.error("{0}: {1}".format(cmdline, error))
return 1
elif not ret:
logger.error("Couldn't get default commandline: {0}".format(re))
return 1
except xmlrpc.client.Fault as x:
logger.error("XMLRPC Fault getting commandline: {0}".format(x))
return 1
active_process_total = None
is_tasks_running = False
while True:
try:
event = eventHandler.waitEvent(0.25)
if not event:
continue
helper.eventHandler(event)
if isinstance(event, bb.build.TaskBase):
logger.info(event._message)
if isinstance(event, logging.LogRecord):
# Don't report sstate failures as errors, since Yocto will just run the tasks for real
if event.msg == "No suitable staging package found" or (event.msg.startswith(
"Fetcher failure: Unable to find file") and "downloadfilename" in event.msg and "sstate" in event.msg):
event.levelno = bb.msg.BBLogFormatter.WARNING
if event.taskpid != 0:
# For "normal" logging conditions, don't show note logs from tasks
# but do show them if the user has changed the default log level to
# include verbose/debug messages
if event.levelno <= bb.msg.BBLogFormatter.NOTE and (event.levelno < llevel or (
event.levelno == bb.msg.BBLogFormatter.NOTE and llevel != bb.msg.BBLogFormatter.VERBOSE)):
continue
# Prefix task messages with recipe/task
if event.taskpid in helper.running_tasks and event.levelno != bb.msg.BBLogFormatter.PLAIN:
taskinfo = helper.running_tasks[event.taskpid]
event.msg = taskinfo['title'] + ': ' + event.msg
if hasattr(event, 'fn'):
event.msg = event.fn + ': ' + event.msg
logger.handle(event)
if isinstance(event, bb.build.TaskFailedSilent):
logger.warning("Logfile for failed setscene task is %s" % event.logfile)
continue
if isinstance(event, bb.build.TaskFailed):
rt = "{0}-{1}:{2}".format(event.pn, event.pv.replace("AUTOINC", "0"), event.task)
logfile = event.logfile
if not logfile or not os.path.exists(logfile):
TeamCityUI.emit_service_message("buildProblem", description="{0}\nUnknown failure (no log file available)".format(rt))
if not event.task.endswith("_setscene"):
server.runCommand(["stateForceShutdown"])
continue
details = deque(maxlen=loglines)
error_lines = []
if includelogs and not event.errprinted:
with open(logfile, "r") as f:
while True:
line = f.readline()
if not line:
break
line = line.rstrip()
details.append(' | %s' % line)
# TODO: a less stupid check for errors
if (event.task == "do_compile") and ("error:" in line):
error_lines.append(line)
if error_lines:
TeamCityUI.emit_service_message("compilationStarted", compiler=rt)
for line in error_lines:
TeamCityUI.emit_service_message("message", text=line, status="ERROR")
TeamCityUI.emit_service_message("compilationFinished", compiler=rt)
else:
TeamCityUI.emit_service_message("buildProblem", description=rt)
err = "Logfile of failure stored in: %s" % logfile
if details:
ui.block_start("{0} task log".format(rt))
# TeamCity seems to choke on service messages longer than about 63800 characters, so if error
# details is longer than, say, 60000, batch it up into several messages.
first_message = True
while details:
detail_len = 0
batch = deque()
while details and detail_len < 60000:
# TODO: This code doesn't bother to handle lines that themselves are extremely long.
line = details.popleft()
batch.append(line)
detail_len += len(line)
if first_message:
batch.appendleft("Log data follows:")
first_message = False
TeamCityUI.emit_service_message("message", text=err, status="ERROR",
errorDetails="\n".join(batch))
else:
TeamCityUI.emit_service_message("message", text="[continued]", status="ERROR",
errorDetails="\n".join(batch))
ui.block_end()
else:
TeamCityUI.emit_service_message("message", text=err, status="ERROR", errorDetails="")
if not event.task.endswith("_setscene"):
server.runCommand(["stateForceShutdown"])
if isinstance(event, bb.event.ProcessStarted):
if event.processname in ["Initialising tasks", "Checking sstate mirror object availability"]:
active_process_total = event.total
ui.block_start(event.processname)
if isinstance(event, bb.event.ProcessFinished):
if event.processname in ["Initialising tasks", "Checking sstate mirror object availability"]:
ui.progress(event.processname, 100)
ui.block_end()
if isinstance(event, bb.event.ProcessProgress):
if event.processname in ["Initialising tasks",
"Checking sstate mirror object availability"] and active_process_total != 0:
ui.progress(event.processname, event.progress * 100 / active_process_total)
if isinstance(event, bb.event.CacheLoadStarted):
ui.block_start("Loading cache")
if isinstance(event, bb.event.CacheLoadProgress):
if event.total != 0:
ui.progress("Loading cache", math.floor(event.current * 100 / event.total))
if isinstance(event, bb.event.CacheLoadCompleted):
ui.progress("Loading cache", 100)
ui.block_end()
if isinstance(event, bb.event.ParseStarted):
ui.block_start("Parsing recipes and checking upstream revisions")
if isinstance(event, bb.event.ParseProgress):
if event.total != 0:
ui.progress("Parsing recipes", math.floor(event.current * 100 / event.total))
if isinstance(event, bb.event.ParseCompleted):
ui.progress("Parsing recipes", 100)
ui.block_end()
if isinstance(event, bb.command.CommandCompleted):
return
if isinstance(event, bb.command.CommandFailed):
logger.error(str(event))
return 1
if isinstance(event, bb.event.MultipleProviders):
logger.warning(str(event))
continue
if isinstance(event, bb.event.NoProvider):
logger.error(str(event))
continue
if isinstance(event, bb.command.CommandExit):
return
if isinstance(event, bb.cooker.CookerExit):
return
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
if not is_tasks_running:
is_tasks_running = True
ui.block_start("Running tasks")
if event.stats.total != 0:
ui.progress("Running setscene tasks", (
event.stats.completed + event.stats.active + event.stats.failed + 1) * 100 / event.stats.total)
if isinstance(event, bb.runqueue.runQueueTaskStarted):
if not is_tasks_running:
is_tasks_running = True
ui.block_start("Running tasks")
if event.stats.total != 0:
pseudo_total = event.stats.total - event.stats.skipped
pseudo_complete = event.stats.completed + event.stats.active - event.stats.skipped + event.stats.failed + 1
# TODO: sometimes this gives over 100%
ui.progress("Running runqueue tasks", (pseudo_complete) * 100 / pseudo_total,
" ({0}/{1})".format(pseudo_complete, pseudo_total))
if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
logger.warning(str(event))
continue
if isinstance(event, bb.runqueue.runQueueTaskFailed):
logger.error(str(event))
return 1
if isinstance(event, bb.event.LogExecTTY):
pass
except EnvironmentError as ioerror:
# ignore interrupted io
if ioerror.args[0] == 4:
pass
except Exception as ex:
logger.error(str(ex))
# except KeyboardInterrupt:
# if shutdown == 2:
# mw.appendText("Third Keyboard Interrupt, exit.\n")
# exitflag = True
# if shutdown == 1:
# mw.appendText("Second Keyboard Interrupt, stopping...\n")
# _, error = server.runCommand(["stateForceShutdown"])
# if error:
# print("Unable to cleanly stop: %s" % error)
# if shutdown == 0:
# mw.appendText("Keyboard Interrupt, closing down...\n")
# _, error = server.runCommand(["stateShutdown"])
# if error:
# print("Unable to cleanly shutdown: %s" % error)
# shutdown = shutdown + 1
# pass | en | 0.720618 | # # TeamCity UI Implementation # # Implements a TeamCity frontend for the BitBake utility, via service messages. # See https://www.jetbrains.com/help/teamcity/build-script-interaction-with-teamcity.html # # Based on ncurses.py and knotty.py, variously by <NAME> and <NAME> # # Copyright (C) 2006 Michael '<NAME> # Copyright (C) 2006-2012 <NAME> # Copyright (C) 2018-2020 Agilent Technologies, Inc. # # SPDX-License-Identifier: GPL-2.0-only # # Author: <NAME> <<EMAIL>> Escape a value for inclusion in a service message. TeamCity uses the vertical pipe character for escaping. See: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-Escapedvalues #teamcity[{0} {1}]".format(message_type, payload) #teamcity[{0} '{1}']".format(message_type, cls.escape_service_value(message)) # ERROR gets a separate errorDetails field # Get values of variables which control our output # Don't report sstate failures as errors, since Yocto will just run the tasks for real # For "normal" logging conditions, don't show note logs from tasks # but do show them if the user has changed the default log level to # include verbose/debug messages # Prefix task messages with recipe/task # TODO: a less stupid check for errors # TeamCity seems to choke on service messages longer than about 63800 characters, so if error # details is longer than, say, 60000, batch it up into several messages. # TODO: This code doesn't bother to handle lines that themselves are extremely long. # TODO: sometimes this gives over 100% # ignore interrupted io # except KeyboardInterrupt: # if shutdown == 2: # mw.appendText("Third Keyboard Interrupt, exit.\n") # exitflag = True # if shutdown == 1: # mw.appendText("Second Keyboard Interrupt, stopping...\n") # _, error = server.runCommand(["stateForceShutdown"]) # if error: # print("Unable to cleanly stop: %s" % error) # if shutdown == 0: # mw.appendText("Keyboard Interrupt, closing down...\n") # _, error = server.runCommand(["stateShutdown"]) # if error: # print("Unable to cleanly shutdown: %s" % error) # shutdown = shutdown + 1 # pass | 2.277225 | 2 |
source/marathon/src/phase2_contrast.py | hawkhai/blog | 1 | 6630065 | <reponame>hawkhai/blog
#encoding=utf8
import os, re, sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
from lib import kalgorithm
def main():
inputfile = r"./input_images/phase2/phase2_broken.jpg"
imgsrc = kalgorithm.imgRead(inputfile)
kalgorithm.pltHistAndImage(imgsrc.astype(np.uint8), "phase2_broken.Original")
img = kalgorithm.histEqualization(imgsrc.copy())
outfile = r"./output_images/phase2/phase2_broken_hist_equalization.jpg"
kalgorithm.imgSave(outfile, img)
kalgorithm.pltHistAndImage(img, "phase2_broken.histEqualization")
img = kalgorithm.histManipulation(imgsrc.copy())
outfile = r"./output_images/phase2/phase2_broken_hist_manipulation.jpg"
kalgorithm.imgSave(outfile, img)
kalgorithm.pltHistAndImage(img, "phase2_broken.histManipulation")
if __name__ == "__main__":
main()
| #encoding=utf8
import os, re, sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
from lib import kalgorithm
def main():
inputfile = r"./input_images/phase2/phase2_broken.jpg"
imgsrc = kalgorithm.imgRead(inputfile)
kalgorithm.pltHistAndImage(imgsrc.astype(np.uint8), "phase2_broken.Original")
img = kalgorithm.histEqualization(imgsrc.copy())
outfile = r"./output_images/phase2/phase2_broken_hist_equalization.jpg"
kalgorithm.imgSave(outfile, img)
kalgorithm.pltHistAndImage(img, "phase2_broken.histEqualization")
img = kalgorithm.histManipulation(imgsrc.copy())
outfile = r"./output_images/phase2/phase2_broken_hist_manipulation.jpg"
kalgorithm.imgSave(outfile, img)
kalgorithm.pltHistAndImage(img, "phase2_broken.histManipulation")
if __name__ == "__main__":
main() | id | 0.244375 | #encoding=utf8 | 2.713872 | 3 |
mmedit/models/restorers/ttsr.py | sokazaki/mmediting | 2 | 6630066 | import numbers
import os.path as osp
import mmcv
import torch
from mmedit.core import tensor2img
from ..builder import build_backbone, build_component, build_loss
from ..registry import MODELS
from .basic_restorer import BasicRestorer
@MODELS.register_module()
class TTSR(BasicRestorer):
"""TTSR model for Reference-based Image Super-Resolution.
Paper: Learning Texture Transformer Network for Image Super-Resolution.
Args:
generator (dict): Config for the generator.
extractor (dict): Config for the extractor.
transformer (dict): Config for the transformer.
pixel_loss (dict): Config for the pixel loss.
train_cfg (dict): Config for train. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
def __init__(self,
generator,
extractor,
transformer,
pixel_loss,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(BasicRestorer, self).__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# model
self.generator = build_backbone(generator)
self.transformer = build_component(transformer)
self.extractor = build_component(extractor)
# loss
self.pixel_loss = build_loss(pixel_loss)
# pretrained
self.init_weights(pretrained)
def forward_dummy(self, lq, lq_up, ref, ref_downup, only_pred=True):
"""Forward of networks.
Args:
lq (Tensor): LQ image.
lq_up (Tensor): Upsampled LQ image.
ref (Tensor): Reference image.
ref_downup (Tensor): Image generated by sequentially applying
bicubic down-sampling and up-sampling on reference image.
only_pred (bool): Only return predicted results or not.
Default: True.
Returns:
pred (Tensor): Predicted super-resolution results (n, 3, 4h, 4w).
s (Tensor): Soft-Attention tensor with shape (n, 1, h, w).
t_level3 (Tensor): Transformed HR texture T in level3.
(n, 4c, h, w)
t_level2 (Tensor): Transformed HR texture T in level2.
(n, 2c, 2h, 2w)
t_level1 (Tensor): Transformed HR texture T in level1.
(n, c, 4h, 4w)
"""
_, _, lq_up_level3 = self.extractor(lq_up)
_, _, ref_downup_level3 = self.extractor(ref_downup)
ref_level1, ref_level2, ref_level3 = self.extractor(ref)
s, t_level3, t_level2, t_level1 = self.transformer(
lq_up_level3, ref_downup_level3, ref_level1, ref_level2,
ref_level3)
pred = self.generator(lq, s, t_level3, t_level2, t_level1)
if only_pred:
return pred
return pred, s, t_level3, t_level2, t_level1
def forward(self, lq, gt=None, test_mode=False, **kwargs):
"""Forward function.
Args:
lq (Tensor): Input lq images.
gt (Tensor): Ground-truth image. Default: None.
test_mode (bool): Whether in test mode or not. Default: False.
kwargs (dict): Other arguments.
"""
if test_mode:
return self.forward_test(lq, gt=gt, **kwargs)
return self.forward_dummy(lq, **kwargs)
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data, which requires
'lq', 'gt', 'lq_up', 'ref', 'ref_downup'
optimizer (obj): Optimizer.
Returns:
dict: Returned output, which includes:
log_vars, num_samples, results (lq, gt and pred).
"""
# data
lq = data_batch['lq']
lq_up = data_batch['lq_up']
gt = data_batch['gt']
ref = data_batch['ref']
ref_downup = data_batch['ref_downup']
# generate
pred = self.forward_dummy(lq, lq_up, ref, ref_downup)
# loss
losses = dict()
losses['loss_pix'] = self.pixel_loss(pred, gt)
# parse loss
loss, log_vars = self.parse_losses(losses)
# optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
log_vars.pop('loss') # remove the unnecessary 'loss'
outputs = dict(
log_vars=log_vars,
num_samples=len(gt.data),
results=dict(
lq=lq.cpu(), gt=gt.cpu(), ref=ref.cpu(), output=pred.cpu()))
return outputs
def forward_test(self,
lq,
lq_up,
ref,
ref_downup,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ image
gt (Tensor): GT image
lq_up (Tensor): Upsampled LQ image
ref (Tensor): Reference image
ref_downup (Tensor): Image generated by sequentially applying
bicubic down-sampling and up-sampling on reference image
meta (list[dict]): Meta data, such as path of GT file.
Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results, which contain either key(s)
1. 'eval_result'.
2. 'lq', 'pred'.
3. 'lq', 'pred', 'gt'.
"""
# generator
with torch.no_grad():
pred = self.forward_dummy(
lq=lq, lq_up=lq_up, ref=ref, ref_downup=ref_downup)
pred = (pred + 1.) / 2.
if gt is not None:
gt = (gt + 1.) / 2.
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(pred, gt))
else:
results = dict(lq=lq.cpu(), output=pred.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
if 'gt_path' in meta[0]:
the_path = meta[0]['gt_path']
else:
the_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(the_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name,
f'{folder_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(pred), save_path)
return results
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
if self.generator:
self.generator.init_weights(pretrained, strict)
if self.extractor:
self.extractor.init_weights(pretrained, strict)
if self.transformer:
self.transformer.init_weights(pretrained, strict)
elif pretrained is not None:
raise TypeError('"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
| import numbers
import os.path as osp
import mmcv
import torch
from mmedit.core import tensor2img
from ..builder import build_backbone, build_component, build_loss
from ..registry import MODELS
from .basic_restorer import BasicRestorer
@MODELS.register_module()
class TTSR(BasicRestorer):
"""TTSR model for Reference-based Image Super-Resolution.
Paper: Learning Texture Transformer Network for Image Super-Resolution.
Args:
generator (dict): Config for the generator.
extractor (dict): Config for the extractor.
transformer (dict): Config for the transformer.
pixel_loss (dict): Config for the pixel loss.
train_cfg (dict): Config for train. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
def __init__(self,
generator,
extractor,
transformer,
pixel_loss,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(BasicRestorer, self).__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# model
self.generator = build_backbone(generator)
self.transformer = build_component(transformer)
self.extractor = build_component(extractor)
# loss
self.pixel_loss = build_loss(pixel_loss)
# pretrained
self.init_weights(pretrained)
def forward_dummy(self, lq, lq_up, ref, ref_downup, only_pred=True):
"""Forward of networks.
Args:
lq (Tensor): LQ image.
lq_up (Tensor): Upsampled LQ image.
ref (Tensor): Reference image.
ref_downup (Tensor): Image generated by sequentially applying
bicubic down-sampling and up-sampling on reference image.
only_pred (bool): Only return predicted results or not.
Default: True.
Returns:
pred (Tensor): Predicted super-resolution results (n, 3, 4h, 4w).
s (Tensor): Soft-Attention tensor with shape (n, 1, h, w).
t_level3 (Tensor): Transformed HR texture T in level3.
(n, 4c, h, w)
t_level2 (Tensor): Transformed HR texture T in level2.
(n, 2c, 2h, 2w)
t_level1 (Tensor): Transformed HR texture T in level1.
(n, c, 4h, 4w)
"""
_, _, lq_up_level3 = self.extractor(lq_up)
_, _, ref_downup_level3 = self.extractor(ref_downup)
ref_level1, ref_level2, ref_level3 = self.extractor(ref)
s, t_level3, t_level2, t_level1 = self.transformer(
lq_up_level3, ref_downup_level3, ref_level1, ref_level2,
ref_level3)
pred = self.generator(lq, s, t_level3, t_level2, t_level1)
if only_pred:
return pred
return pred, s, t_level3, t_level2, t_level1
def forward(self, lq, gt=None, test_mode=False, **kwargs):
"""Forward function.
Args:
lq (Tensor): Input lq images.
gt (Tensor): Ground-truth image. Default: None.
test_mode (bool): Whether in test mode or not. Default: False.
kwargs (dict): Other arguments.
"""
if test_mode:
return self.forward_test(lq, gt=gt, **kwargs)
return self.forward_dummy(lq, **kwargs)
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data, which requires
'lq', 'gt', 'lq_up', 'ref', 'ref_downup'
optimizer (obj): Optimizer.
Returns:
dict: Returned output, which includes:
log_vars, num_samples, results (lq, gt and pred).
"""
# data
lq = data_batch['lq']
lq_up = data_batch['lq_up']
gt = data_batch['gt']
ref = data_batch['ref']
ref_downup = data_batch['ref_downup']
# generate
pred = self.forward_dummy(lq, lq_up, ref, ref_downup)
# loss
losses = dict()
losses['loss_pix'] = self.pixel_loss(pred, gt)
# parse loss
loss, log_vars = self.parse_losses(losses)
# optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
log_vars.pop('loss') # remove the unnecessary 'loss'
outputs = dict(
log_vars=log_vars,
num_samples=len(gt.data),
results=dict(
lq=lq.cpu(), gt=gt.cpu(), ref=ref.cpu(), output=pred.cpu()))
return outputs
def forward_test(self,
lq,
lq_up,
ref,
ref_downup,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ image
gt (Tensor): GT image
lq_up (Tensor): Upsampled LQ image
ref (Tensor): Reference image
ref_downup (Tensor): Image generated by sequentially applying
bicubic down-sampling and up-sampling on reference image
meta (list[dict]): Meta data, such as path of GT file.
Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results, which contain either key(s)
1. 'eval_result'.
2. 'lq', 'pred'.
3. 'lq', 'pred', 'gt'.
"""
# generator
with torch.no_grad():
pred = self.forward_dummy(
lq=lq, lq_up=lq_up, ref=ref, ref_downup=ref_downup)
pred = (pred + 1.) / 2.
if gt is not None:
gt = (gt + 1.) / 2.
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(pred, gt))
else:
results = dict(lq=lq.cpu(), output=pred.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
if 'gt_path' in meta[0]:
the_path = meta[0]['gt_path']
else:
the_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(the_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name,
f'{folder_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(pred), save_path)
return results
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
if self.generator:
self.generator.init_weights(pretrained, strict)
if self.extractor:
self.extractor.init_weights(pretrained, strict)
if self.transformer:
self.transformer.init_weights(pretrained, strict)
elif pretrained is not None:
raise TypeError('"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
| en | 0.609925 | TTSR model for Reference-based Image Super-Resolution. Paper: Learning Texture Transformer Network for Image Super-Resolution. Args: generator (dict): Config for the generator. extractor (dict): Config for the extractor. transformer (dict): Config for the transformer. pixel_loss (dict): Config for the pixel loss. train_cfg (dict): Config for train. Default: None. test_cfg (dict): Config for testing. Default: None. pretrained (str): Path for pretrained model. Default: None. # model # loss # pretrained Forward of networks. Args: lq (Tensor): LQ image. lq_up (Tensor): Upsampled LQ image. ref (Tensor): Reference image. ref_downup (Tensor): Image generated by sequentially applying bicubic down-sampling and up-sampling on reference image. only_pred (bool): Only return predicted results or not. Default: True. Returns: pred (Tensor): Predicted super-resolution results (n, 3, 4h, 4w). s (Tensor): Soft-Attention tensor with shape (n, 1, h, w). t_level3 (Tensor): Transformed HR texture T in level3. (n, 4c, h, w) t_level2 (Tensor): Transformed HR texture T in level2. (n, 2c, 2h, 2w) t_level1 (Tensor): Transformed HR texture T in level1. (n, c, 4h, 4w) Forward function. Args: lq (Tensor): Input lq images. gt (Tensor): Ground-truth image. Default: None. test_mode (bool): Whether in test mode or not. Default: False. kwargs (dict): Other arguments. Train step. Args: data_batch (dict): A batch of data, which requires 'lq', 'gt', 'lq_up', 'ref', 'ref_downup' optimizer (obj): Optimizer. Returns: dict: Returned output, which includes: log_vars, num_samples, results (lq, gt and pred). # data # generate # loss # parse loss # optimize # remove the unnecessary 'loss' Testing forward function. Args: lq (Tensor): LQ image gt (Tensor): GT image lq_up (Tensor): Upsampled LQ image ref (Tensor): Reference image ref_downup (Tensor): Image generated by sequentially applying bicubic down-sampling and up-sampling on reference image meta (list[dict]): Meta data, such as path of GT file. Default: None. save_image (bool): Whether to save image. Default: False. save_path (str): Path to save image. Default: None. iteration (int): Iteration for the saving image name. Default: None. Returns: dict: Output results, which contain either key(s) 1. 'eval_result'. 2. 'lq', 'pred'. 3. 'lq', 'pred', 'gt'. # generator # save image Init weights for models. Args: pretrained (str, optional): Path for pretrained weights. If given None, pretrained weights will not be loaded. Defaults to None. strict (boo, optional): Whether strictly load the pretrained model. Defaults to True. | 2.138178 | 2 |
test_IPsniffer.py | eLuxUniBS/IPsniffer | 0 | 6630067 | <gh_stars>0
from ipsniffer import IPsniffer
import time
import datetime as dt
iface = "enp0s31f6"
def test_sniffer():
snffer = IPsniffer("test", iface=iface, filter="", count=3)
snffer.start()
snffer.p.join()
assert snffer.buffer.qsize() == 3
def test_restart():
snffer = IPsniffer("test", iface=iface, filter="", count=3)
snffer.start()
snffer.kill()
time.sleep(0.5)
while not snffer.buffer.empty():
snffer.buffer.get_nowait()
snffer.start()
snffer.p.join()
assert snffer.buffer.qsize() == 3
def test_write_pcap():
snffer = IPsniffer("test", iface=iface, filter="")
min = 0.1
snffer.start()
tstart = dt.datetime.now()
snffer.save_pcap_in_interval("test.pcap", min)
tend = dt.datetime.now()
snffer.kill()
assert ((tend - tstart) - dt.timedelta(minutes=min)).total_seconds() < 1
def test_offline():
sniffer = IPsniffer('test_offline', iface=None, filter=None, offline='/home/paolo/CSCS-TEMP/ns.cap-modbus.pcap')
sniffer.start()
time.sleep(1)
sniffer.kill()
assert sniffer.buffer.qsize() != 0
def test_getpkt():
count = 5
sniffer = IPsniffer('test_offline', iface=None, filter='tcp and dst port 502', count=count,offline='/home/paolo/CSCS-TEMP/ns.cap-17-00.pcap',)
sniffer.start()
# sniffer.kill()
sniffer.p.join()
ml = list()
while sniffer.buffer.qsize() != 0:
pkt = sniffer.buffer.get()
print pkt.show()
ml.append(pkt)
assert len(ml) == count
| from ipsniffer import IPsniffer
import time
import datetime as dt
iface = "enp0s31f6"
def test_sniffer():
snffer = IPsniffer("test", iface=iface, filter="", count=3)
snffer.start()
snffer.p.join()
assert snffer.buffer.qsize() == 3
def test_restart():
snffer = IPsniffer("test", iface=iface, filter="", count=3)
snffer.start()
snffer.kill()
time.sleep(0.5)
while not snffer.buffer.empty():
snffer.buffer.get_nowait()
snffer.start()
snffer.p.join()
assert snffer.buffer.qsize() == 3
def test_write_pcap():
snffer = IPsniffer("test", iface=iface, filter="")
min = 0.1
snffer.start()
tstart = dt.datetime.now()
snffer.save_pcap_in_interval("test.pcap", min)
tend = dt.datetime.now()
snffer.kill()
assert ((tend - tstart) - dt.timedelta(minutes=min)).total_seconds() < 1
def test_offline():
sniffer = IPsniffer('test_offline', iface=None, filter=None, offline='/home/paolo/CSCS-TEMP/ns.cap-modbus.pcap')
sniffer.start()
time.sleep(1)
sniffer.kill()
assert sniffer.buffer.qsize() != 0
def test_getpkt():
count = 5
sniffer = IPsniffer('test_offline', iface=None, filter='tcp and dst port 502', count=count,offline='/home/paolo/CSCS-TEMP/ns.cap-17-00.pcap',)
sniffer.start()
# sniffer.kill()
sniffer.p.join()
ml = list()
while sniffer.buffer.qsize() != 0:
pkt = sniffer.buffer.get()
print pkt.show()
ml.append(pkt)
assert len(ml) == count | en | 0.457361 | # sniffer.kill() | 2.353768 | 2 |
mars/scheduler/operands/tests/test_common_ut.py | HarshCasper/mars | 2 | 6630068 | <gh_stars>1-10
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import unittest
import uuid
from collections import defaultdict
from mars import promise, tensor as mt
from mars.graph import DAG
from mars.scheduler import OperandState, ResourceActor, ChunkMetaActor,\
ChunkMetaClient, AssignerActor, GraphActor, OperandActor
from mars.scheduler.utils import SchedulerClusterInfoActor
from mars.tests.core import patch_method, create_actor_pool
from mars.utils import get_next_port, serialize_graph
class FakeExecutionActor(promise.PromiseActor):
def __init__(self, exec_delay):
super().__init__()
self._exec_delay = exec_delay
self._finished_keys = set()
self._enqueue_callbacks = dict()
self._finish_callbacks = defaultdict(list)
self._undone_preds = dict()
self._succs = dict()
def mock_send_all_callbacks(self, graph_key):
for cb in self._finish_callbacks[graph_key]:
self.tell_promise(cb, {})
self._finished_keys.add(graph_key)
self._finish_callbacks[graph_key] = []
try:
for succ_key in self._succs[graph_key]:
self._undone_preds[succ_key].difference_update([graph_key])
if not self._undone_preds[succ_key]:
self.tell_promise(self._enqueue_callbacks[succ_key])
except KeyError:
pass
def execute_graph(self, session_id, graph_key, graph_ser, io_meta, data_metas,
send_addresses=None, callback=None):
if callback:
self._finish_callbacks[graph_key].append(callback)
self.ref().mock_send_all_callbacks(graph_key, _tell=True, _delay=self._exec_delay)
def add_finish_callback(self, session_id, graph_key, callback):
if graph_key in self._finished_keys:
self.tell_promise(callback)
else:
self._finish_callbacks[graph_key].append(callback)
@patch_method(ResourceActor._broadcast_sessions)
@patch_method(ResourceActor._broadcast_workers)
class Test(unittest.TestCase):
@contextlib.contextmanager
def _prepare_test_graph(self, session_id, graph_key, mock_workers):
addr = f'127.0.0.1:{get_next_port()}'
a1 = mt.random.random((100,))
a2 = mt.random.random((100,))
s = a1 + a2
v1, v2 = mt.split(s, 2)
graph = DAG()
v1.build_graph(graph=graph, compose=False)
v2.build_graph(graph=graph, compose=False)
with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
pool.create_actor(SchedulerClusterInfoActor, [pool.cluster_info.address],
uid=SchedulerClusterInfoActor.default_uid())
resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
pool.create_actor(AssignerActor, uid=AssignerActor.gen_uid(session_id))
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialize_graph(graph),
uid=GraphActor.gen_uid(session_id, graph_key))
for w in mock_workers:
resource_ref.set_worker_meta(w, dict(hardware=dict(cpu=4, cpu_total=4, memory=1600)))
graph_ref.prepare_graph()
graph_ref.analyze_graph()
graph_ref.create_operand_actors(_start=False)
yield pool, graph_ref
@staticmethod
def _filter_graph_level_op_keys(graph_ref):
from mars.tensor.random import TensorRandomSample
from mars.tensor.indexing.getitem import TensorIndex
from mars.tensor.arithmetic import TensorAdd
graph = graph_ref.get_chunk_graph()
return (
[c.op.key for c in graph if isinstance(c.op, TensorRandomSample)],
[c.op.key for c in graph if isinstance(c.op, TensorAdd)][0],
[c.op.key for c in graph if isinstance(c.op, TensorIndex)],
)
@staticmethod
def _filter_graph_level_chunk_keys(graph_ref):
from mars.tensor.random import TensorRandomSample
from mars.tensor.indexing.getitem import TensorIndex
from mars.tensor.arithmetic import TensorAdd
graph = graph_ref.get_chunk_graph()
return (
[c.key for c in graph if isinstance(c.op, TensorRandomSample)],
[c.key for c in graph if isinstance(c.op, TensorAdd)][0],
[c.key for c in graph if isinstance(c.op, TensorIndex)],
)
@patch_method(ResourceActor.allocate_resource, new=lambda *_, **__: True)
@patch_method(ResourceActor.detach_dead_workers)
@patch_method(ResourceActor.detect_dead_workers)
def testReadyState(self, *_):
session_id = str(uuid.uuid4())
graph_key = str(<KEY>())
mock_workers = ['localhost:12345', 'localhost:23456']
def _mock_get_workers_meta(*_, **__):
return dict((w, dict(hardware=dict(cpu_total=1, memory=1024 ** 3))) for w in mock_workers)
with patch_method(ResourceActor.get_workers_meta, new=_mock_get_workers_meta) as _, \
self._prepare_test_graph(session_id, graph_key, mock_workers) as (pool, graph_ref):
input_op_keys, mid_op_key, output_op_keys = self._filter_graph_level_op_keys(graph_ref)
meta_client = ChunkMetaClient(pool, pool.actor_ref(SchedulerClusterInfoActor.default_uid()))
op_ref = pool.actor_ref(OperandActor.gen_uid(session_id, mid_op_key))
resource_ref = pool.actor_ref(ResourceActor.default_uid())
input_refs = [pool.actor_ref(OperandActor.gen_uid(session_id, k)) for k in input_op_keys]
def test_entering_state(target):
for key in input_op_keys:
op_ref.remove_finished_predecessor(key)
op_ref.start_operand(OperandState.UNSCHEDULED)
for ref in input_refs:
ref.start_operand(OperandState.UNSCHEDULED)
for ref in input_refs:
self.assertEqual(op_ref.get_state(), OperandState.UNSCHEDULED)
ref.start_operand(OperandState.FINISHED)
pool.sleep(1)
self.assertEqual(target, op_ref.get_state())
for w in mock_workers:
resource_ref.deallocate_resource(session_id, mid_op_key, w)
# test entering state with no input meta
test_entering_state(OperandState.UNSCHEDULED)
# fill meta
input_chunk_keys, _, _ = self._filter_graph_level_chunk_keys(graph_ref)
for ck in input_chunk_keys:
meta_client.set_chunk_meta(session_id, ck, workers=('localhost:12345',), size=800)
# test successful entering state
test_entering_state(OperandState.READY)
| # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import unittest
import uuid
from collections import defaultdict
from mars import promise, tensor as mt
from mars.graph import DAG
from mars.scheduler import OperandState, ResourceActor, ChunkMetaActor,\
ChunkMetaClient, AssignerActor, GraphActor, OperandActor
from mars.scheduler.utils import SchedulerClusterInfoActor
from mars.tests.core import patch_method, create_actor_pool
from mars.utils import get_next_port, serialize_graph
class FakeExecutionActor(promise.PromiseActor):
def __init__(self, exec_delay):
super().__init__()
self._exec_delay = exec_delay
self._finished_keys = set()
self._enqueue_callbacks = dict()
self._finish_callbacks = defaultdict(list)
self._undone_preds = dict()
self._succs = dict()
def mock_send_all_callbacks(self, graph_key):
for cb in self._finish_callbacks[graph_key]:
self.tell_promise(cb, {})
self._finished_keys.add(graph_key)
self._finish_callbacks[graph_key] = []
try:
for succ_key in self._succs[graph_key]:
self._undone_preds[succ_key].difference_update([graph_key])
if not self._undone_preds[succ_key]:
self.tell_promise(self._enqueue_callbacks[succ_key])
except KeyError:
pass
def execute_graph(self, session_id, graph_key, graph_ser, io_meta, data_metas,
send_addresses=None, callback=None):
if callback:
self._finish_callbacks[graph_key].append(callback)
self.ref().mock_send_all_callbacks(graph_key, _tell=True, _delay=self._exec_delay)
def add_finish_callback(self, session_id, graph_key, callback):
if graph_key in self._finished_keys:
self.tell_promise(callback)
else:
self._finish_callbacks[graph_key].append(callback)
@patch_method(ResourceActor._broadcast_sessions)
@patch_method(ResourceActor._broadcast_workers)
class Test(unittest.TestCase):
@contextlib.contextmanager
def _prepare_test_graph(self, session_id, graph_key, mock_workers):
addr = f'127.0.0.1:{get_next_port()}'
a1 = mt.random.random((100,))
a2 = mt.random.random((100,))
s = a1 + a2
v1, v2 = mt.split(s, 2)
graph = DAG()
v1.build_graph(graph=graph, compose=False)
v2.build_graph(graph=graph, compose=False)
with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
pool.create_actor(SchedulerClusterInfoActor, [pool.cluster_info.address],
uid=SchedulerClusterInfoActor.default_uid())
resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
pool.create_actor(AssignerActor, uid=AssignerActor.gen_uid(session_id))
graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialize_graph(graph),
uid=GraphActor.gen_uid(session_id, graph_key))
for w in mock_workers:
resource_ref.set_worker_meta(w, dict(hardware=dict(cpu=4, cpu_total=4, memory=1600)))
graph_ref.prepare_graph()
graph_ref.analyze_graph()
graph_ref.create_operand_actors(_start=False)
yield pool, graph_ref
@staticmethod
def _filter_graph_level_op_keys(graph_ref):
from mars.tensor.random import TensorRandomSample
from mars.tensor.indexing.getitem import TensorIndex
from mars.tensor.arithmetic import TensorAdd
graph = graph_ref.get_chunk_graph()
return (
[c.op.key for c in graph if isinstance(c.op, TensorRandomSample)],
[c.op.key for c in graph if isinstance(c.op, TensorAdd)][0],
[c.op.key for c in graph if isinstance(c.op, TensorIndex)],
)
@staticmethod
def _filter_graph_level_chunk_keys(graph_ref):
from mars.tensor.random import TensorRandomSample
from mars.tensor.indexing.getitem import TensorIndex
from mars.tensor.arithmetic import TensorAdd
graph = graph_ref.get_chunk_graph()
return (
[c.key for c in graph if isinstance(c.op, TensorRandomSample)],
[c.key for c in graph if isinstance(c.op, TensorAdd)][0],
[c.key for c in graph if isinstance(c.op, TensorIndex)],
)
@patch_method(ResourceActor.allocate_resource, new=lambda *_, **__: True)
@patch_method(ResourceActor.detach_dead_workers)
@patch_method(ResourceActor.detect_dead_workers)
def testReadyState(self, *_):
session_id = str(uuid.uuid4())
graph_key = str(<KEY>())
mock_workers = ['localhost:12345', 'localhost:23456']
def _mock_get_workers_meta(*_, **__):
return dict((w, dict(hardware=dict(cpu_total=1, memory=1024 ** 3))) for w in mock_workers)
with patch_method(ResourceActor.get_workers_meta, new=_mock_get_workers_meta) as _, \
self._prepare_test_graph(session_id, graph_key, mock_workers) as (pool, graph_ref):
input_op_keys, mid_op_key, output_op_keys = self._filter_graph_level_op_keys(graph_ref)
meta_client = ChunkMetaClient(pool, pool.actor_ref(SchedulerClusterInfoActor.default_uid()))
op_ref = pool.actor_ref(OperandActor.gen_uid(session_id, mid_op_key))
resource_ref = pool.actor_ref(ResourceActor.default_uid())
input_refs = [pool.actor_ref(OperandActor.gen_uid(session_id, k)) for k in input_op_keys]
def test_entering_state(target):
for key in input_op_keys:
op_ref.remove_finished_predecessor(key)
op_ref.start_operand(OperandState.UNSCHEDULED)
for ref in input_refs:
ref.start_operand(OperandState.UNSCHEDULED)
for ref in input_refs:
self.assertEqual(op_ref.get_state(), OperandState.UNSCHEDULED)
ref.start_operand(OperandState.FINISHED)
pool.sleep(1)
self.assertEqual(target, op_ref.get_state())
for w in mock_workers:
resource_ref.deallocate_resource(session_id, mid_op_key, w)
# test entering state with no input meta
test_entering_state(OperandState.UNSCHEDULED)
# fill meta
input_chunk_keys, _, _ = self._filter_graph_level_chunk_keys(graph_ref)
for ck in input_chunk_keys:
meta_client.set_chunk_meta(session_id, ck, workers=('localhost:12345',), size=800)
# test successful entering state
test_entering_state(OperandState.READY) | en | 0.816707 | # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # test entering state with no input meta # fill meta # test successful entering state | 1.617286 | 2 |
pygrading/static/kernel/__main__.py | PhenomingZ/PyGrading | 1 | 6630069 | import pygrading as gg
from prework import prework
from run import run
from postwork import postwork
if __name__ == '__main__':
# 创建任务对象
job = gg.Job(prework=prework, run=run, postwork=postwork)
# 启动任务并配置最大线程数
job.start(max_workers=1)
# 输出结果JSON串
job.print()
| import pygrading as gg
from prework import prework
from run import run
from postwork import postwork
if __name__ == '__main__':
# 创建任务对象
job = gg.Job(prework=prework, run=run, postwork=postwork)
# 启动任务并配置最大线程数
job.start(max_workers=1)
# 输出结果JSON串
job.print()
| zh | 0.905949 | # 创建任务对象 # 启动任务并配置最大线程数 # 输出结果JSON串 | 2.154988 | 2 |
testing/input/customization/linktypes.py | fekblom/critic | 216 | 6630070 | import linkify
class IssueLink(linkify.LinkType):
def __init__(self):
super(IssueLink, self).__init__("#[0-9]+")
def linkify(self, word, context):
return "https://issuetracker.example.com/showIssue?id=" + word[1:]
IssueLink()
| import linkify
class IssueLink(linkify.LinkType):
def __init__(self):
super(IssueLink, self).__init__("#[0-9]+")
def linkify(self, word, context):
return "https://issuetracker.example.com/showIssue?id=" + word[1:]
IssueLink()
| none | 1 | 2.545779 | 3 |
|
connector/revconn.py | dantmnf/ArknightsAutoHelper | 2 | 6630071 | import socket
import selectors
import threading
import secrets
import queue
class ReverseConnectionHost(threading.Thread):
def __init__(self, port=0):
super().__init__()
self.daemon = True
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_sock.bind(('127.0.0.1', port))
self.port = self.listen_sock.getsockname()[1]
self.registered = {}
self.fulfilled = {}
self.registered_lock = threading.RLock()
self.fulfilled_lock = threading.RLock()
def __del__(self):
for cookie, evt in self.registered.items():
evt.set()
def register_cookie(self, cookie=None):
with self.registered_lock:
if cookie is None:
while True:
cookie = b'%08X' % secrets.randbits(32)
if cookie not in self.registered:
break
else:
assert len(cookie) == 8
if cookie not in self.registered:
self.registered[cookie] = threading.Event()
return cookie
def wait_registered_socket(self, cookie, timeout=15):
assert len(cookie) == 8
with self.fulfilled_lock:
if cookie in self.fulfilled:
sock = self.fulfilled[cookie]
del self.fulfilled[cookie]
return sock
e = self.registered[cookie]
if e.wait(timeout):
return self.wait_registered_socket(cookie)
return None
def _fulfilled(self, cookie, sock):
with self.fulfilled_lock:
self.fulfilled[cookie] = sock
self.registered[cookie].set()
with self.registered_lock:
del self.registered[cookie]
def run(self):
self.listen_sock.listen()
self.sel = selectors.DefaultSelector()
self.sel.register(self.listen_sock, selectors.EVENT_READ, (self._accept_conn, None))
while True:
# print('selecting ', list(self.sel.get_map().keys()))
if len(self.sel.get_map()) == 0:
break
events = self.sel.select(1)
for key, event in events:
callback, cookie = key.data
callback(key.fileobj, event, cookie)
def stop(self):
self.sel.unregister(self.listen_sock)
def _accept_conn(self, sock, event, _):
conn, peer = sock.accept()
self.sel.register(conn, selectors.EVENT_READ, (self._conn_data, [b'']))
def _conn_data(self, sock, event, box):
data = sock.recv(8 - len(box[0]))
if data:
box[0] += data
if len(box[0]) == 8:
self.sel.unregister(sock)
cookie = box[0]
if cookie in self.registered:
self._fulfilled(cookie, sock)
else:
sock.close()
else:
self.sel.unregister(sock)
sock.close()
def main():
worker = ReverseConnectionHost(11451)
worker.start()
try:
while True:
worker.register_cookie(b'0000000\n')
sock = worker.wait_registered_socket(b'0000000\n')
while True:
buf = sock.recv(4096)
if not buf:
break
sock.send(buf)
sock.close()
finally:
worker.stop()
if __name__ == "__main__":
main() | import socket
import selectors
import threading
import secrets
import queue
class ReverseConnectionHost(threading.Thread):
def __init__(self, port=0):
super().__init__()
self.daemon = True
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_sock.bind(('127.0.0.1', port))
self.port = self.listen_sock.getsockname()[1]
self.registered = {}
self.fulfilled = {}
self.registered_lock = threading.RLock()
self.fulfilled_lock = threading.RLock()
def __del__(self):
for cookie, evt in self.registered.items():
evt.set()
def register_cookie(self, cookie=None):
with self.registered_lock:
if cookie is None:
while True:
cookie = b'%08X' % secrets.randbits(32)
if cookie not in self.registered:
break
else:
assert len(cookie) == 8
if cookie not in self.registered:
self.registered[cookie] = threading.Event()
return cookie
def wait_registered_socket(self, cookie, timeout=15):
assert len(cookie) == 8
with self.fulfilled_lock:
if cookie in self.fulfilled:
sock = self.fulfilled[cookie]
del self.fulfilled[cookie]
return sock
e = self.registered[cookie]
if e.wait(timeout):
return self.wait_registered_socket(cookie)
return None
def _fulfilled(self, cookie, sock):
with self.fulfilled_lock:
self.fulfilled[cookie] = sock
self.registered[cookie].set()
with self.registered_lock:
del self.registered[cookie]
def run(self):
self.listen_sock.listen()
self.sel = selectors.DefaultSelector()
self.sel.register(self.listen_sock, selectors.EVENT_READ, (self._accept_conn, None))
while True:
# print('selecting ', list(self.sel.get_map().keys()))
if len(self.sel.get_map()) == 0:
break
events = self.sel.select(1)
for key, event in events:
callback, cookie = key.data
callback(key.fileobj, event, cookie)
def stop(self):
self.sel.unregister(self.listen_sock)
def _accept_conn(self, sock, event, _):
conn, peer = sock.accept()
self.sel.register(conn, selectors.EVENT_READ, (self._conn_data, [b'']))
def _conn_data(self, sock, event, box):
data = sock.recv(8 - len(box[0]))
if data:
box[0] += data
if len(box[0]) == 8:
self.sel.unregister(sock)
cookie = box[0]
if cookie in self.registered:
self._fulfilled(cookie, sock)
else:
sock.close()
else:
self.sel.unregister(sock)
sock.close()
def main():
worker = ReverseConnectionHost(11451)
worker.start()
try:
while True:
worker.register_cookie(b'0000000\n')
sock = worker.wait_registered_socket(b'0000000\n')
while True:
buf = sock.recv(4096)
if not buf:
break
sock.send(buf)
sock.close()
finally:
worker.stop()
if __name__ == "__main__":
main() | en | 0.12421 | # print('selecting ', list(self.sel.get_map().keys())) | 2.643142 | 3 |
tab_fit.py | chuckedfromspace/carspy-dash | 0 | 6630072 | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
from app import app
from utils import (DEFAULT_SETTINGS_SLIT, DEFAULT_SETTINGS_FIT,
downsample_synth, plot_fitting,
plot_placeholder, plot_slit, least_sqrt_fit, unpack_lmfit,
add_fit_result)
from tab_synthesize import synth_mode_select, synth_inputs, input_slider
# slit function settings tab
def make_tab_slit(sigma, k, a_sigma, a_k, sigma_L_l, sigma_L_h, slit):
tab_slit = [
synth_mode_select("Slit function", "slit-addon", "slit-select",
["sGaussian", "sVoigt"],
"Choose the type of slit function",
slit),
dbc.Row(
[
dbc.Col(
[
synth_inputs("sigma", "sigma", sigma),
synth_inputs("k", "k", k),
synth_inputs("sigma_L_l", "sigma_L_l", sigma_L_l),
],
className="tab-col pl-3"
),
dbc.Col(
[
synth_inputs("a_sigma", "a_sigma", a_sigma),
synth_inputs("a_k", "a_k", a_k),
synth_inputs("sigma_L_h", "sigma_L_h", sigma_L_h),
],
className="tab-col pr-3"
),
],
className="mt-2 mb-2"
),
dbc.Spinner(
dcc.Graph(
id="graph-slit-function",
figure=plot_placeholder(280)
),
color="primary"
)
]
return tab_slit
# fit settings tab
def make_tab_fit(sample_length, noise_level, offset):
tab_fit = [
input_slider("Sample length",
"sample_length", sample_length, 60, 240, 20),
dbc.Row(
[
dbc.Col(
[
synth_inputs("noise_level", "noise_level",
noise_level),
],
className="tab-col pl-3"
),
dbc.Col(
[
synth_inputs("offset", "offset", offset),
],
className="tab-col pr-3"
),
],
className="mt-2 mb-2"
),
dbc.Button(
[
dbc.Spinner(html.Div("Start fit", id="fitting-status"),
size="sm")
],
id="start-fit-button", n_clicks=0,
color="primary"
),
dbc.Button(
[
html.Div("Show results"),
],
id="print-report-button", n_clicks=0,
className="ml-2",
color="primary"
),
html.Div(
id="report",
className="mt-2 border-0",
style={"overflow": "auto",
"height": "280px",
"background": "#e5ecf6"}
)
]
return tab_fit
# original signal tab
def make_tab_origin():
tab_origin = [
dbc.RadioItems(
options=[
{"label": "Linear", "value": "Linear"},
{"label": "Log", "value": "Log"},
],
value="Linear",
inline=True,
id="change-y-scale"
),
dbc.Spinner(
dcc.Graph(id="synth-signal", figure=plot_placeholder(400),
className="mt-2"),
color="primary"
),
dbc.Button(
html.I(
title="Reset to default",
className="fas fa-undo-alt ml-0",
style={"font-size": "1.5em"},
),
className="float-right p-0 shadow-none",
color="link",
size="sm",
id="reset-button-fit",
n_clicks=0,
)
]
return tab_origin
# fit signal tab
def make_tab_fitting():
tab_fitting = [
dbc.Row(
[
dbc.RadioItems(
options=[
{"label": "Markers", "value": "markers"},
{"label": "Line", "value": "lines"},
],
value="markers",
inline=True,
id="change-line-style"
),
dbc.Checklist(
options=[
{"label": "Show fit", "value": "Show fit",
"disabled": True},
],
value=[],
id="show-fit-button",
switch=True,
),
]
),
dbc.Spinner(
dcc.Graph(id="fit-signal", figure=plot_placeholder(400),
className="mt-2"),
color="primary"
),
dbc.Button(
html.I(
title="Reset to default",
className="fas fa-undo-alt ml-0",
style={"font-size": "1.5em"},
),
className="float-right p-0 shadow-none",
color="link",
size="sm",
id="reset-button-fit",
n_clicks=0,
)
]
return tab_fitting
# disable input based on slit function
@app.callback(
[
Output("sigma_L_l", "disabled"),
Output("sigma_L_h", "disabled"),
],
Input("slit-select", "value")
)
def disable_slit_input(value):
if value == "sGaussian":
return True, True
else:
return False, False
# plot slit graph
@app.callback(
Output("graph-slit-function", "figure"),
[
Input("memory-settings-slit", "data"),
Input("memory-synth-spectrum", "data"),
],
)
def update_slit_func(parameters, spect_memo):
nu, _ = spect_memo
return plot_slit(nu, parameters)
# update fit settings
@app.callback(
Output("memory-settings-fit", "data"),
[
Input('sample_length', 'value'),
Input('noise_level', 'value'),
Input('offset', 'value'),
],
State("memory-settings-fit", "data"),
)
def update_memory_fit(sample_length, noise_level, offset, data):
data["sample_length"] = float(sample_length)
data["noise_level"] = float(noise_level)
data["offset"] = float(offset)
return data
# reset slit settings
@app.callback(
[
Output('sample_length', 'value'),
Output('noise_level', 'value'),
Output('offset', 'value'),
],
Input('reset-button-fit', 'n_clicks'),
State("memory-settings-fit", "data"),
)
def reset_fit(n, data):
if n > 0:
data = DEFAULT_SETTINGS_FIT
_settings = [data["sample_length"], data["noise_level"], data["offset"]]
return _settings
# update slit settings
@app.callback(
Output("memory-settings-slit", "data"),
[
Input('sigma', 'value'),
Input('a_sigma', 'value'),
Input('k', 'value'),
Input('a_k', 'value'),
Input('sigma_L_l', 'value'),
Input('sigma_L_h', 'value'),
Input('slit-select', 'value')
],
State("memory-settings-slit", "data"),
)
def update_memory_slit(sigma, a_sigma, k, a_k, sigma_L_l, sigma_L_h,
slit_shape, data):
data["sigma"] = float(sigma)
data["a_sigma"] = float(a_sigma)
data["k"] = float(k)
data["a_k"] = float(a_k)
data["sigma_L_l"] = float(sigma_L_l)
data["sigma_L_h"] = float(sigma_L_h)
data["slit"] = slit_shape
return data
# reset slit settings
@app.callback(
[
Output('sigma', 'value'),
Output('a_sigma', 'value'),
Output('k', 'value'),
Output('a_k', 'value'),
Output('sigma_L_l', 'value'),
Output('sigma_L_h', 'value'),
Output('slit-select', 'value')
],
Input('reset-button-fit', 'n_clicks'),
State("memory-settings-slit", "data"),
)
def reset_slit(n, data):
if n > 0:
data = DEFAULT_SETTINGS_SLIT
_settings = [data["sigma"], data["a_sigma"], data["k"], data["a_k"],
data["sigma_L_l"], data["sigma_L_h"], data["slit"]]
return _settings
# reset the reset button n_clicks to 0 when switching between settings tabs
@app.callback(
Output("reset-button-fit", "n_clicks"),
Input("fit-settings", "active_tab"),
)
def re_zero_fit(active_tab):
if active_tab:
return 0
# make the settings tabs always with settings stored in the memories
@app.callback(
Output("fit-settings-card", "children"),
Input("fit-settings", "active_tab"),
State("memory-settings-fit", "data"),
State("memory-settings-slit", "data")
)
def fit_settings_tab_content(active_tab, data_1, data_2):
if active_tab == "fit-settings-1":
return make_tab_fit(**data_1)
if active_tab == "fit-settings-2":
return make_tab_slit(**data_2)
# create fit signal
@app.callback(
Output("memory-fit-signal", "data"),
[
Input("memory-settings-slit", "data"),
Input("memory-synth-spectrum", "data"),
Input("memory-settings-fit", "data"),
Input("memory-settings-models", "data"),
],
)
def update_fit_signal(slit_parameters, spect_memo, fit_settings, data_1,
):
nu, spect = spect_memo
nu_expt, spect_expt, x_range = downsample_synth(
nu, spect, data_1['nu_start'], data_1['nu_end'], **fit_settings,
slit_parameters=slit_parameters)
return [nu_expt, spect_expt, x_range]
# plot fit signal
@app.callback(
Output("fit-signal", "figure"),
[
Input("memory-fit-signal", "data"),
Input("change-line-style", "value"),
],
Input("show-fit-button", "value"),
State("memory-fit-report", "data"),
)
def update_fit_graph(data, mode, show_click, fit_memo):
fig = plot_fitting(*data, mode=mode)
if show_click:
fig = add_fit_result(fig, fit_memo['nu'], fit_memo['best_fit'])
return fig
# createa graph tabs
@app.callback(
Output("fit-graph", "children"),
Input("tab-fit-graph", "active_tab"),
)
def update_fit_spectrum(active_tab):
if active_tab == "tab-fit-origin":
return make_tab_origin()
elif active_tab == "tab-fit-signal":
return make_tab_fitting()
# perform a fit
@app.callback(
[
Output("fitting-status", "children"),
Output("memory-fit-report", "data"),
],
Input("start-fit-button", "n_clicks"),
State("memory-fit-signal", "data"),
State("memory-settings-slit", "data"),
State("memory-settings-models", "data"),
State("memory-settings-conditions", "data"),
)
def update_fit(n_clicks, data, slit_parameters, settings_models,
settings_conditions):
fit_result = []
if n_clicks:
fit_result = least_sqrt_fit(
data[0],
data[1],
slit_parameters,
settings_models,
settings_conditions)
fit_result = unpack_lmfit(fit_result)
return "Start fit", fit_result
# update show-fit-button
@app.callback(
Output("show-fit-button", "options"),
Output("show-fit-button", "value"),
Output("show-fit-button", "labelClassName"),
Input("memory-fit-report", "data")
)
def update_show_fit_button(data):
_switch = [{"label": "Show fit", "value": "Show fit", "disabled": False}]
if data:
return _switch, ["Show fit"], "text-primary"
else:
_switch[0]["disabled"] = True
return _switch, [], None
# update show-report-button
@app.callback(
Output("print-report-button", "disabled"),
Input("start-fit-button", "n_clicks"),
Input("fitting-status", "children")
)
def change_button_status(n_clicks, fitting_status):
if n_clicks and fitting_status == "Start fit":
return False
else:
return True
# print report
@app.callback(
Output("report", "children"),
Input("print-report-button", "n_clicks"),
State("memory-fit-report", "data")
)
def show_report(n_clicks, data):
report = ["Fitting results will be shown here"]
if n_clicks and report:
report = [html.P(_row, className="mb-0")
for _row in data["report"].split("\n")]
return report
# settings panels
card_setting = dbc.Col(
dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab(label="Fit Parameters",
tab_id="fit-settings-1"),
dbc.Tab(label="Slit Function",
tab_id="fit-settings-2"),
],
id="fit-settings",
card=True,
active_tab="fit-settings-1",
),
style={"background-color": "#e9ecef"}
),
dbc.CardBody(
id="fit-settings-card"
),
],
style={"height": "540px"},
className="border-0"
),
xs=12,
md=5,
className="tab-col mb-2",
)
# signal panel
card_fit = dbc.Col(
dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab(label="Fit Signal",
tab_id="tab-fit-signal"),
dbc.Tab(label="Original Signal",
tab_id="tab-fit-origin"),
],
id="tab-fit-graph",
card=True,
active_tab="tab-fit-signal"
),
style={"background-color": "#e9ecef"}
),
dbc.CardBody(id="fit-graph"),
],
style={"height": "540px"},
className="border-0"
),
xs=12,
md=7,
className="tab-col mb-2"
)
# combine the two cards together
tab_fit = dbc.Row(
[
card_setting,
card_fit
],
className="mb-1",
)
| import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
from app import app
from utils import (DEFAULT_SETTINGS_SLIT, DEFAULT_SETTINGS_FIT,
downsample_synth, plot_fitting,
plot_placeholder, plot_slit, least_sqrt_fit, unpack_lmfit,
add_fit_result)
from tab_synthesize import synth_mode_select, synth_inputs, input_slider
# slit function settings tab
def make_tab_slit(sigma, k, a_sigma, a_k, sigma_L_l, sigma_L_h, slit):
tab_slit = [
synth_mode_select("Slit function", "slit-addon", "slit-select",
["sGaussian", "sVoigt"],
"Choose the type of slit function",
slit),
dbc.Row(
[
dbc.Col(
[
synth_inputs("sigma", "sigma", sigma),
synth_inputs("k", "k", k),
synth_inputs("sigma_L_l", "sigma_L_l", sigma_L_l),
],
className="tab-col pl-3"
),
dbc.Col(
[
synth_inputs("a_sigma", "a_sigma", a_sigma),
synth_inputs("a_k", "a_k", a_k),
synth_inputs("sigma_L_h", "sigma_L_h", sigma_L_h),
],
className="tab-col pr-3"
),
],
className="mt-2 mb-2"
),
dbc.Spinner(
dcc.Graph(
id="graph-slit-function",
figure=plot_placeholder(280)
),
color="primary"
)
]
return tab_slit
# fit settings tab
def make_tab_fit(sample_length, noise_level, offset):
tab_fit = [
input_slider("Sample length",
"sample_length", sample_length, 60, 240, 20),
dbc.Row(
[
dbc.Col(
[
synth_inputs("noise_level", "noise_level",
noise_level),
],
className="tab-col pl-3"
),
dbc.Col(
[
synth_inputs("offset", "offset", offset),
],
className="tab-col pr-3"
),
],
className="mt-2 mb-2"
),
dbc.Button(
[
dbc.Spinner(html.Div("Start fit", id="fitting-status"),
size="sm")
],
id="start-fit-button", n_clicks=0,
color="primary"
),
dbc.Button(
[
html.Div("Show results"),
],
id="print-report-button", n_clicks=0,
className="ml-2",
color="primary"
),
html.Div(
id="report",
className="mt-2 border-0",
style={"overflow": "auto",
"height": "280px",
"background": "#e5ecf6"}
)
]
return tab_fit
# original signal tab
def make_tab_origin():
tab_origin = [
dbc.RadioItems(
options=[
{"label": "Linear", "value": "Linear"},
{"label": "Log", "value": "Log"},
],
value="Linear",
inline=True,
id="change-y-scale"
),
dbc.Spinner(
dcc.Graph(id="synth-signal", figure=plot_placeholder(400),
className="mt-2"),
color="primary"
),
dbc.Button(
html.I(
title="Reset to default",
className="fas fa-undo-alt ml-0",
style={"font-size": "1.5em"},
),
className="float-right p-0 shadow-none",
color="link",
size="sm",
id="reset-button-fit",
n_clicks=0,
)
]
return tab_origin
# fit signal tab
def make_tab_fitting():
tab_fitting = [
dbc.Row(
[
dbc.RadioItems(
options=[
{"label": "Markers", "value": "markers"},
{"label": "Line", "value": "lines"},
],
value="markers",
inline=True,
id="change-line-style"
),
dbc.Checklist(
options=[
{"label": "Show fit", "value": "Show fit",
"disabled": True},
],
value=[],
id="show-fit-button",
switch=True,
),
]
),
dbc.Spinner(
dcc.Graph(id="fit-signal", figure=plot_placeholder(400),
className="mt-2"),
color="primary"
),
dbc.Button(
html.I(
title="Reset to default",
className="fas fa-undo-alt ml-0",
style={"font-size": "1.5em"},
),
className="float-right p-0 shadow-none",
color="link",
size="sm",
id="reset-button-fit",
n_clicks=0,
)
]
return tab_fitting
# disable input based on slit function
@app.callback(
[
Output("sigma_L_l", "disabled"),
Output("sigma_L_h", "disabled"),
],
Input("slit-select", "value")
)
def disable_slit_input(value):
if value == "sGaussian":
return True, True
else:
return False, False
# plot slit graph
@app.callback(
Output("graph-slit-function", "figure"),
[
Input("memory-settings-slit", "data"),
Input("memory-synth-spectrum", "data"),
],
)
def update_slit_func(parameters, spect_memo):
nu, _ = spect_memo
return plot_slit(nu, parameters)
# update fit settings
@app.callback(
Output("memory-settings-fit", "data"),
[
Input('sample_length', 'value'),
Input('noise_level', 'value'),
Input('offset', 'value'),
],
State("memory-settings-fit", "data"),
)
def update_memory_fit(sample_length, noise_level, offset, data):
data["sample_length"] = float(sample_length)
data["noise_level"] = float(noise_level)
data["offset"] = float(offset)
return data
# reset slit settings
@app.callback(
[
Output('sample_length', 'value'),
Output('noise_level', 'value'),
Output('offset', 'value'),
],
Input('reset-button-fit', 'n_clicks'),
State("memory-settings-fit", "data"),
)
def reset_fit(n, data):
if n > 0:
data = DEFAULT_SETTINGS_FIT
_settings = [data["sample_length"], data["noise_level"], data["offset"]]
return _settings
# update slit settings
@app.callback(
Output("memory-settings-slit", "data"),
[
Input('sigma', 'value'),
Input('a_sigma', 'value'),
Input('k', 'value'),
Input('a_k', 'value'),
Input('sigma_L_l', 'value'),
Input('sigma_L_h', 'value'),
Input('slit-select', 'value')
],
State("memory-settings-slit", "data"),
)
def update_memory_slit(sigma, a_sigma, k, a_k, sigma_L_l, sigma_L_h,
slit_shape, data):
data["sigma"] = float(sigma)
data["a_sigma"] = float(a_sigma)
data["k"] = float(k)
data["a_k"] = float(a_k)
data["sigma_L_l"] = float(sigma_L_l)
data["sigma_L_h"] = float(sigma_L_h)
data["slit"] = slit_shape
return data
# reset slit settings
@app.callback(
[
Output('sigma', 'value'),
Output('a_sigma', 'value'),
Output('k', 'value'),
Output('a_k', 'value'),
Output('sigma_L_l', 'value'),
Output('sigma_L_h', 'value'),
Output('slit-select', 'value')
],
Input('reset-button-fit', 'n_clicks'),
State("memory-settings-slit", "data"),
)
def reset_slit(n, data):
if n > 0:
data = DEFAULT_SETTINGS_SLIT
_settings = [data["sigma"], data["a_sigma"], data["k"], data["a_k"],
data["sigma_L_l"], data["sigma_L_h"], data["slit"]]
return _settings
# reset the reset button n_clicks to 0 when switching between settings tabs
@app.callback(
Output("reset-button-fit", "n_clicks"),
Input("fit-settings", "active_tab"),
)
def re_zero_fit(active_tab):
if active_tab:
return 0
# make the settings tabs always with settings stored in the memories
@app.callback(
Output("fit-settings-card", "children"),
Input("fit-settings", "active_tab"),
State("memory-settings-fit", "data"),
State("memory-settings-slit", "data")
)
def fit_settings_tab_content(active_tab, data_1, data_2):
if active_tab == "fit-settings-1":
return make_tab_fit(**data_1)
if active_tab == "fit-settings-2":
return make_tab_slit(**data_2)
# create fit signal
@app.callback(
Output("memory-fit-signal", "data"),
[
Input("memory-settings-slit", "data"),
Input("memory-synth-spectrum", "data"),
Input("memory-settings-fit", "data"),
Input("memory-settings-models", "data"),
],
)
def update_fit_signal(slit_parameters, spect_memo, fit_settings, data_1,
):
nu, spect = spect_memo
nu_expt, spect_expt, x_range = downsample_synth(
nu, spect, data_1['nu_start'], data_1['nu_end'], **fit_settings,
slit_parameters=slit_parameters)
return [nu_expt, spect_expt, x_range]
# plot fit signal
@app.callback(
Output("fit-signal", "figure"),
[
Input("memory-fit-signal", "data"),
Input("change-line-style", "value"),
],
Input("show-fit-button", "value"),
State("memory-fit-report", "data"),
)
def update_fit_graph(data, mode, show_click, fit_memo):
fig = plot_fitting(*data, mode=mode)
if show_click:
fig = add_fit_result(fig, fit_memo['nu'], fit_memo['best_fit'])
return fig
# createa graph tabs
@app.callback(
Output("fit-graph", "children"),
Input("tab-fit-graph", "active_tab"),
)
def update_fit_spectrum(active_tab):
if active_tab == "tab-fit-origin":
return make_tab_origin()
elif active_tab == "tab-fit-signal":
return make_tab_fitting()
# perform a fit
@app.callback(
[
Output("fitting-status", "children"),
Output("memory-fit-report", "data"),
],
Input("start-fit-button", "n_clicks"),
State("memory-fit-signal", "data"),
State("memory-settings-slit", "data"),
State("memory-settings-models", "data"),
State("memory-settings-conditions", "data"),
)
def update_fit(n_clicks, data, slit_parameters, settings_models,
settings_conditions):
fit_result = []
if n_clicks:
fit_result = least_sqrt_fit(
data[0],
data[1],
slit_parameters,
settings_models,
settings_conditions)
fit_result = unpack_lmfit(fit_result)
return "Start fit", fit_result
# update show-fit-button
@app.callback(
Output("show-fit-button", "options"),
Output("show-fit-button", "value"),
Output("show-fit-button", "labelClassName"),
Input("memory-fit-report", "data")
)
def update_show_fit_button(data):
_switch = [{"label": "Show fit", "value": "Show fit", "disabled": False}]
if data:
return _switch, ["Show fit"], "text-primary"
else:
_switch[0]["disabled"] = True
return _switch, [], None
# update show-report-button
@app.callback(
Output("print-report-button", "disabled"),
Input("start-fit-button", "n_clicks"),
Input("fitting-status", "children")
)
def change_button_status(n_clicks, fitting_status):
if n_clicks and fitting_status == "Start fit":
return False
else:
return True
# print report
@app.callback(
Output("report", "children"),
Input("print-report-button", "n_clicks"),
State("memory-fit-report", "data")
)
def show_report(n_clicks, data):
report = ["Fitting results will be shown here"]
if n_clicks and report:
report = [html.P(_row, className="mb-0")
for _row in data["report"].split("\n")]
return report
# settings panels
card_setting = dbc.Col(
dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab(label="Fit Parameters",
tab_id="fit-settings-1"),
dbc.Tab(label="Slit Function",
tab_id="fit-settings-2"),
],
id="fit-settings",
card=True,
active_tab="fit-settings-1",
),
style={"background-color": "#e9ecef"}
),
dbc.CardBody(
id="fit-settings-card"
),
],
style={"height": "540px"},
className="border-0"
),
xs=12,
md=5,
className="tab-col mb-2",
)
# signal panel
card_fit = dbc.Col(
dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab(label="Fit Signal",
tab_id="tab-fit-signal"),
dbc.Tab(label="Original Signal",
tab_id="tab-fit-origin"),
],
id="tab-fit-graph",
card=True,
active_tab="tab-fit-signal"
),
style={"background-color": "#e9ecef"}
),
dbc.CardBody(id="fit-graph"),
],
style={"height": "540px"},
className="border-0"
),
xs=12,
md=7,
className="tab-col mb-2"
)
# combine the two cards together
tab_fit = dbc.Row(
[
card_setting,
card_fit
],
className="mb-1",
)
| en | 0.659078 | # slit function settings tab # fit settings tab # original signal tab # fit signal tab # disable input based on slit function # plot slit graph # update fit settings # reset slit settings # update slit settings # reset slit settings # reset the reset button n_clicks to 0 when switching between settings tabs # make the settings tabs always with settings stored in the memories # create fit signal # plot fit signal # createa graph tabs # perform a fit # update show-fit-button # update show-report-button # print report # settings panels # signal panel # combine the two cards together | 2.175406 | 2 |
tensorflow_examples/lite/model_maker/core/data_util/recommendation_dataloader.py | tariapper/examples | 0 | 6630073 | <reponame>tariapper/examples
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recommendation dataloader class."""
import json
import os
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import file_util
from tensorflow_examples.lite.model_maker.core.data_util import dataloader
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.data import example_generation_movielens as _gen
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher_keras as _launcher
class RecommendationDataLoader(dataloader.DataLoader):
"""Recommendation data loader."""
def __init__(self, dataset, size, vocab_file):
"""Init data loader.
Dataset is tf.data.Dataset of examples, containing:
for inputs:
- 'context': int64[], context ids as the input of variable length.
for outputs:
- 'label': int64[1], label id to predict.
where context is controlled by `max_context_length` in generating examples.
The vocab file should be json format of: a list of list[size=4], where the 4
elements are ordered as:
[id=int, title=str, genres=str joined with '|', count=int]
Args:
dataset: tf.data.Dataset for recommendation.
size: int, dataset size.
vocab_file: str, vocab file in json format.
"""
super(RecommendationDataLoader, self).__init__(dataset, size)
self.vocab_file = vocab_file
def gen_dataset(self,
batch_size=1,
is_training=False,
shuffle=False,
input_pipeline_context=None,
preprocess=None,
drop_remainder=True):
"""Generates dataset, and overwrites default drop_remainder = True."""
return super(RecommendationDataLoader, self).gen_dataset(
batch_size=batch_size,
is_training=is_training,
shuffle=shuffle,
input_pipeline_context=input_pipeline_context,
preprocess=preprocess,
drop_remainder=drop_remainder,
)
def split(self, fraction):
return self._split(fraction, self.vocab_file)
def load_vocab_and_item_size(self):
"""Loads vocab from file.
The vocab file should be json format of: a list of list[size=4], where the 4
elements are ordered as:
[id=int, title=str, genres=str joined with '|', count=int]
It is generated when preparing movielens dataset.
Returns:
vocab list: a list of vocab dict representing movies
{
'id': int,
'title': str,
'genres': list of str,
'count': int,
}
item size: int, the max id of all vocab.
"""
with tf.io.gfile.GFile(self.vocab_file) as f:
vocab_json = json.load(f)
vocab = []
for v in vocab_json:
vocab.append({
'id': v[0],
'title': v[1],
'genres': v[2].split('|'),
'count': v[3],
})
item_size = max((v['id'] for v in vocab))
return vocab, item_size
@staticmethod
def read_as_dataset(filepattern):
"""Reads file pattern as dataset."""
dataset = _launcher.InputFn.read_dataset(filepattern)
return dataset.map(
_launcher.InputFn.decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@classmethod
def _prepare_movielens_datasets(cls,
raw_data_dir,
generated_dir,
train_filename,
test_filename,
vocab_filename,
meta_filename,
min_timeline_length=3,
max_context_length=10,
build_movie_vocab=True):
"""Prepare movielens datasets, and returns a dict contains meta."""
train_file = os.path.join(generated_dir, train_filename)
test_file = os.path.join(generated_dir, test_filename)
meta_file = os.path.join(generated_dir, meta_filename)
# Create dataset and meta, only if they are not existed.
if not all([os.path.exists(f) for f in (train_file, test_file, meta_file)]):
stats = _gen.generate_datasets(
data_dir=raw_data_dir,
output_dir=generated_dir,
min_timeline_length=min_timeline_length,
max_context_length=max_context_length,
build_movie_vocab=build_movie_vocab,
train_filename=train_filename,
test_filename=test_filename,
vocab_filename=vocab_filename,
)
file_util.write_json_file(meta_file, stats)
meta = file_util.load_json_file(meta_file)
return meta
@classmethod
def from_movielens(cls,
generated_dir,
data_tag,
raw_data_dir,
min_timeline_length=3,
max_context_length=10,
build_movie_vocab=True,
train_filename='train_movielens_1m.tfrecord',
test_filename='test_movielens_1m.tfrecord',
vocab_filename='movie_vocab.json',
meta_filename='meta.json'):
"""Generates data loader from movielens dataset.
The method downloads and prepares dataset, then generates for train/eval.
For `movielens` data format, see:
- function `_generate_fake_data` in `recommendation_testutil.py`
- Or, zip file: http://files.grouplens.org/datasets/movielens/ml-1m.zip
Args:
generated_dir: str, path to generate preprocessed examples.
data_tag: str, specify dataset in {'train', 'test'}.
raw_data_dir: str, path to download raw data, and unzip.
min_timeline_length: int, min timeline length to split train/eval set.
max_context_length: int, max context length as the input.
build_movie_vocab: boolean, whether to build movie vocab.
train_filename: str, generated file name for training data.
test_filename: str, generated file name for test data.
vocab_filename: str, generated file name for vocab data.
meta_filename: str, generated file name for meta data.
Returns:
Data Loader.
"""
if data_tag not in ('train', 'test'):
raise ValueError(
'Expected data_tag is train or test, but got {}'.format(data_tag))
meta = cls._prepare_movielens_datasets(
raw_data_dir,
generated_dir,
train_filename=train_filename,
test_filename=test_filename,
vocab_filename=vocab_filename,
meta_filename=meta_filename,
min_timeline_length=min_timeline_length,
max_context_length=max_context_length,
build_movie_vocab=build_movie_vocab)
if data_tag == 'train':
ds = cls.read_as_dataset(meta['train_file'])
return cls(ds, meta['train_size'], meta['vocab_file'])
elif data_tag == 'test':
ds = cls.read_as_dataset(meta['test_file'])
return cls(ds, meta['test_size'], meta['vocab_file'])
| # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recommendation dataloader class."""
import json
import os
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import file_util
from tensorflow_examples.lite.model_maker.core.data_util import dataloader
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.data import example_generation_movielens as _gen
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher_keras as _launcher
class RecommendationDataLoader(dataloader.DataLoader):
"""Recommendation data loader."""
def __init__(self, dataset, size, vocab_file):
"""Init data loader.
Dataset is tf.data.Dataset of examples, containing:
for inputs:
- 'context': int64[], context ids as the input of variable length.
for outputs:
- 'label': int64[1], label id to predict.
where context is controlled by `max_context_length` in generating examples.
The vocab file should be json format of: a list of list[size=4], where the 4
elements are ordered as:
[id=int, title=str, genres=str joined with '|', count=int]
Args:
dataset: tf.data.Dataset for recommendation.
size: int, dataset size.
vocab_file: str, vocab file in json format.
"""
super(RecommendationDataLoader, self).__init__(dataset, size)
self.vocab_file = vocab_file
def gen_dataset(self,
batch_size=1,
is_training=False,
shuffle=False,
input_pipeline_context=None,
preprocess=None,
drop_remainder=True):
"""Generates dataset, and overwrites default drop_remainder = True."""
return super(RecommendationDataLoader, self).gen_dataset(
batch_size=batch_size,
is_training=is_training,
shuffle=shuffle,
input_pipeline_context=input_pipeline_context,
preprocess=preprocess,
drop_remainder=drop_remainder,
)
def split(self, fraction):
return self._split(fraction, self.vocab_file)
def load_vocab_and_item_size(self):
"""Loads vocab from file.
The vocab file should be json format of: a list of list[size=4], where the 4
elements are ordered as:
[id=int, title=str, genres=str joined with '|', count=int]
It is generated when preparing movielens dataset.
Returns:
vocab list: a list of vocab dict representing movies
{
'id': int,
'title': str,
'genres': list of str,
'count': int,
}
item size: int, the max id of all vocab.
"""
with tf.io.gfile.GFile(self.vocab_file) as f:
vocab_json = json.load(f)
vocab = []
for v in vocab_json:
vocab.append({
'id': v[0],
'title': v[1],
'genres': v[2].split('|'),
'count': v[3],
})
item_size = max((v['id'] for v in vocab))
return vocab, item_size
@staticmethod
def read_as_dataset(filepattern):
"""Reads file pattern as dataset."""
dataset = _launcher.InputFn.read_dataset(filepattern)
return dataset.map(
_launcher.InputFn.decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@classmethod
def _prepare_movielens_datasets(cls,
raw_data_dir,
generated_dir,
train_filename,
test_filename,
vocab_filename,
meta_filename,
min_timeline_length=3,
max_context_length=10,
build_movie_vocab=True):
"""Prepare movielens datasets, and returns a dict contains meta."""
train_file = os.path.join(generated_dir, train_filename)
test_file = os.path.join(generated_dir, test_filename)
meta_file = os.path.join(generated_dir, meta_filename)
# Create dataset and meta, only if they are not existed.
if not all([os.path.exists(f) for f in (train_file, test_file, meta_file)]):
stats = _gen.generate_datasets(
data_dir=raw_data_dir,
output_dir=generated_dir,
min_timeline_length=min_timeline_length,
max_context_length=max_context_length,
build_movie_vocab=build_movie_vocab,
train_filename=train_filename,
test_filename=test_filename,
vocab_filename=vocab_filename,
)
file_util.write_json_file(meta_file, stats)
meta = file_util.load_json_file(meta_file)
return meta
@classmethod
def from_movielens(cls,
generated_dir,
data_tag,
raw_data_dir,
min_timeline_length=3,
max_context_length=10,
build_movie_vocab=True,
train_filename='train_movielens_1m.tfrecord',
test_filename='test_movielens_1m.tfrecord',
vocab_filename='movie_vocab.json',
meta_filename='meta.json'):
"""Generates data loader from movielens dataset.
The method downloads and prepares dataset, then generates for train/eval.
For `movielens` data format, see:
- function `_generate_fake_data` in `recommendation_testutil.py`
- Or, zip file: http://files.grouplens.org/datasets/movielens/ml-1m.zip
Args:
generated_dir: str, path to generate preprocessed examples.
data_tag: str, specify dataset in {'train', 'test'}.
raw_data_dir: str, path to download raw data, and unzip.
min_timeline_length: int, min timeline length to split train/eval set.
max_context_length: int, max context length as the input.
build_movie_vocab: boolean, whether to build movie vocab.
train_filename: str, generated file name for training data.
test_filename: str, generated file name for test data.
vocab_filename: str, generated file name for vocab data.
meta_filename: str, generated file name for meta data.
Returns:
Data Loader.
"""
if data_tag not in ('train', 'test'):
raise ValueError(
'Expected data_tag is train or test, but got {}'.format(data_tag))
meta = cls._prepare_movielens_datasets(
raw_data_dir,
generated_dir,
train_filename=train_filename,
test_filename=test_filename,
vocab_filename=vocab_filename,
meta_filename=meta_filename,
min_timeline_length=min_timeline_length,
max_context_length=max_context_length,
build_movie_vocab=build_movie_vocab)
if data_tag == 'train':
ds = cls.read_as_dataset(meta['train_file'])
return cls(ds, meta['train_size'], meta['vocab_file'])
elif data_tag == 'test':
ds = cls.read_as_dataset(meta['test_file'])
return cls(ds, meta['test_size'], meta['vocab_file']) | en | 0.667796 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Recommendation dataloader class. Recommendation data loader. Init data loader. Dataset is tf.data.Dataset of examples, containing: for inputs: - 'context': int64[], context ids as the input of variable length. for outputs: - 'label': int64[1], label id to predict. where context is controlled by `max_context_length` in generating examples. The vocab file should be json format of: a list of list[size=4], where the 4 elements are ordered as: [id=int, title=str, genres=str joined with '|', count=int] Args: dataset: tf.data.Dataset for recommendation. size: int, dataset size. vocab_file: str, vocab file in json format. Generates dataset, and overwrites default drop_remainder = True. Loads vocab from file. The vocab file should be json format of: a list of list[size=4], where the 4 elements are ordered as: [id=int, title=str, genres=str joined with '|', count=int] It is generated when preparing movielens dataset. Returns: vocab list: a list of vocab dict representing movies { 'id': int, 'title': str, 'genres': list of str, 'count': int, } item size: int, the max id of all vocab. Reads file pattern as dataset. Prepare movielens datasets, and returns a dict contains meta. # Create dataset and meta, only if they are not existed. Generates data loader from movielens dataset. The method downloads and prepares dataset, then generates for train/eval. For `movielens` data format, see: - function `_generate_fake_data` in `recommendation_testutil.py` - Or, zip file: http://files.grouplens.org/datasets/movielens/ml-1m.zip Args: generated_dir: str, path to generate preprocessed examples. data_tag: str, specify dataset in {'train', 'test'}. raw_data_dir: str, path to download raw data, and unzip. min_timeline_length: int, min timeline length to split train/eval set. max_context_length: int, max context length as the input. build_movie_vocab: boolean, whether to build movie vocab. train_filename: str, generated file name for training data. test_filename: str, generated file name for test data. vocab_filename: str, generated file name for vocab data. meta_filename: str, generated file name for meta data. Returns: Data Loader. | 2.346763 | 2 |
wizard.py | DeXtreme/project-wizard | 1 | 6630074 | import argparse
import os
import subprocess
from pathlib import Path
from distutils import dir_util
class Wizard:
def __init__(self,
project_dir,
frontends,
project_name = None,
git_repo = None,
backend = "api") -> None:
"""Initializes project files and directories
Parameters
----------
project_dir : str
The path to create the project directory at
frontends : [str, ...]
A list of the names of the frontend projects
project_name : str
The name of the project directory. If omitted the name of
github repo will be used instead
git_repo: str
The URL of the github repo to be cloned
backend : str
The name of the backend project
"""
self.project_dir = project_dir
self.project_name = project_name
self.git_repo = git_repo
self.frontends = frontends
self.backend = backend
os.chdir(self.project_dir)
def clone_git(self):
# Clone the git repo
print(f"==== Cloning {self.git_repo} repository ====")
subprocess.run(["git", "clone", self.git_repo], check=True)
self.project_name = self.git_repo.split("/")[-1]
def create_project(self):
# Create the project folder
print(f"==== Creating {self.project_name} project folder ====")
dir_util.mkpath(self.project_name)
def create_main_dir(self):
# Create the frontend and backend subdirectories
print("==== Creating main directories ====")
dir_util.mkpath(f"./{self.project_name}/Frontend")
dir_util.mkpath(f"./{self.project_name}/Backend")
def create_dockerfiles(self, path):
# Create test,staging and production dockerfiles at `path`
Path(f"{path}/dockerfile.test").touch()
Path(f"{path}/dockerfile.staging").touch()
Path(f"{path}/dockerfile.prod").touch()
def create_env_files(self, name):
# Create .env files
Path(f"./{self.project_name}/.env/{name}.test.env").touch()
Path(f"./{self.project_name}/.env/{name}.staging.env").touch()
Path(f"./{self.project_name}/.env/{name}.prod.env").touch()
def create_frontends(self):
#Create frontend webapps
print("==== Creating frontends ====")
subprocess.run(f"npx create-react-app ./{self.project_name}/Frontend/tmp", check=True, shell=True)
for app in self.frontends:
print(f".... Creating {app} app ....")
dir_util.copy_tree(f"./{self.project_name}/Frontend/tmp", f"./{self.project_name}/Frontend/{app}")
self.create_dockerfiles(f"./{self.project_name}/Frontend/{app}")
dir_util.remove_tree(f"./{self.project_name}/Frontend/tmp")
def create_backend(self):
#Create backend with a docker container
print(f"==== Creating {self.backend} backend ====")
cwd = os.getcwd()
subprocess.run(["docker", "run", "--rm", "-v",
f"{cwd}/{self.project_name}/Backend:/api",
"python:3.7-alpine",
"sh", "-c",
f"pip install django;cd /api;django-admin startproject {self.backend} ."],check=True)
self.create_dockerfiles(f"./{self.project_name}/Backend")
def create_env(self):
# Create .env folder and files
print("==== Creating .env folder ====")
dir_util.mkpath(f"./{self.project_name}/.env")
for app in self.frontends:
print(f".... Creating {app} app .env files ....")
self.create_env_files(app)
print(f".... Creating {self.backend} .env files ....")
self.create_env_files(self.backend)
def create_docker_compose_files(self):
# Create docker compose file
print("==== Creating docker-compose files ====")
Path(f"./{self.project_name}/docker-compose.test.yml").touch()
Path(f"./{self.project_name}/docker-compose.staging.yml").touch()
Path(f"./{self.project_name}/docker-compose.prod.yml").touch()
def __call__(self):
try:
if self.project_name:
self.create_project()
else:
self.clone_git()
self.create_main_dir()
self.create_frontends()
self.create_backend()
self.create_env()
self.create_docker_compose_files()
print("#*#* HAPPY CODING *#*#")
except Exception as e:
print(e)
parser = argparse.ArgumentParser(description="Initializes fullstack web project files and directories")
parser.add_argument("path", nargs="?", default="./", help="Project path")
parser.add_argument('apps', nargs="+", help="Frontend project names")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-p","--project", help="Project name")
group.add_argument("-g","--git", help="GitHub repository")
parser.add_argument("-b","--api", default="api", help="Backend project name")
args = parser.parse_args()
instance = Wizard(args.path,
args.apps,
args.project,
args.git,
args.api)
instance() | import argparse
import os
import subprocess
from pathlib import Path
from distutils import dir_util
class Wizard:
def __init__(self,
project_dir,
frontends,
project_name = None,
git_repo = None,
backend = "api") -> None:
"""Initializes project files and directories
Parameters
----------
project_dir : str
The path to create the project directory at
frontends : [str, ...]
A list of the names of the frontend projects
project_name : str
The name of the project directory. If omitted the name of
github repo will be used instead
git_repo: str
The URL of the github repo to be cloned
backend : str
The name of the backend project
"""
self.project_dir = project_dir
self.project_name = project_name
self.git_repo = git_repo
self.frontends = frontends
self.backend = backend
os.chdir(self.project_dir)
def clone_git(self):
# Clone the git repo
print(f"==== Cloning {self.git_repo} repository ====")
subprocess.run(["git", "clone", self.git_repo], check=True)
self.project_name = self.git_repo.split("/")[-1]
def create_project(self):
# Create the project folder
print(f"==== Creating {self.project_name} project folder ====")
dir_util.mkpath(self.project_name)
def create_main_dir(self):
# Create the frontend and backend subdirectories
print("==== Creating main directories ====")
dir_util.mkpath(f"./{self.project_name}/Frontend")
dir_util.mkpath(f"./{self.project_name}/Backend")
def create_dockerfiles(self, path):
# Create test,staging and production dockerfiles at `path`
Path(f"{path}/dockerfile.test").touch()
Path(f"{path}/dockerfile.staging").touch()
Path(f"{path}/dockerfile.prod").touch()
def create_env_files(self, name):
# Create .env files
Path(f"./{self.project_name}/.env/{name}.test.env").touch()
Path(f"./{self.project_name}/.env/{name}.staging.env").touch()
Path(f"./{self.project_name}/.env/{name}.prod.env").touch()
def create_frontends(self):
#Create frontend webapps
print("==== Creating frontends ====")
subprocess.run(f"npx create-react-app ./{self.project_name}/Frontend/tmp", check=True, shell=True)
for app in self.frontends:
print(f".... Creating {app} app ....")
dir_util.copy_tree(f"./{self.project_name}/Frontend/tmp", f"./{self.project_name}/Frontend/{app}")
self.create_dockerfiles(f"./{self.project_name}/Frontend/{app}")
dir_util.remove_tree(f"./{self.project_name}/Frontend/tmp")
def create_backend(self):
#Create backend with a docker container
print(f"==== Creating {self.backend} backend ====")
cwd = os.getcwd()
subprocess.run(["docker", "run", "--rm", "-v",
f"{cwd}/{self.project_name}/Backend:/api",
"python:3.7-alpine",
"sh", "-c",
f"pip install django;cd /api;django-admin startproject {self.backend} ."],check=True)
self.create_dockerfiles(f"./{self.project_name}/Backend")
def create_env(self):
# Create .env folder and files
print("==== Creating .env folder ====")
dir_util.mkpath(f"./{self.project_name}/.env")
for app in self.frontends:
print(f".... Creating {app} app .env files ....")
self.create_env_files(app)
print(f".... Creating {self.backend} .env files ....")
self.create_env_files(self.backend)
def create_docker_compose_files(self):
# Create docker compose file
print("==== Creating docker-compose files ====")
Path(f"./{self.project_name}/docker-compose.test.yml").touch()
Path(f"./{self.project_name}/docker-compose.staging.yml").touch()
Path(f"./{self.project_name}/docker-compose.prod.yml").touch()
def __call__(self):
try:
if self.project_name:
self.create_project()
else:
self.clone_git()
self.create_main_dir()
self.create_frontends()
self.create_backend()
self.create_env()
self.create_docker_compose_files()
print("#*#* HAPPY CODING *#*#")
except Exception as e:
print(e)
parser = argparse.ArgumentParser(description="Initializes fullstack web project files and directories")
parser.add_argument("path", nargs="?", default="./", help="Project path")
parser.add_argument('apps', nargs="+", help="Frontend project names")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-p","--project", help="Project name")
group.add_argument("-g","--git", help="GitHub repository")
parser.add_argument("-b","--api", default="api", help="Backend project name")
args = parser.parse_args()
instance = Wizard(args.path,
args.apps,
args.project,
args.git,
args.api)
instance() | en | 0.582694 | Initializes project files and directories Parameters ---------- project_dir : str The path to create the project directory at frontends : [str, ...] A list of the names of the frontend projects project_name : str The name of the project directory. If omitted the name of github repo will be used instead git_repo: str The URL of the github repo to be cloned backend : str The name of the backend project # Clone the git repo # Create the project folder # Create the frontend and backend subdirectories # Create test,staging and production dockerfiles at `path` # Create .env files #Create frontend webapps #Create backend with a docker container # Create .env folder and files # Create docker compose file #* HAPPY CODING *#*#") | 2.837532 | 3 |
RenderingEngine.py | Nathcat/3d-Rendering-Engine | 0 | 6630075 | <filename>RenderingEngine.py
from Camera import *
class Engine:
def __init__(self, vertices, camera_position):
self.vertices = vertices
self.camera = Camera(camera_position)
self.version = "0.1"
def render(self):
self.camera.render(self.vertices)
def __str__(self):
return f"3D Rendering Engine by Nathcat, Version: {self.version}, Vertices: {self.vertices}, Camera: {self.camera}"
| <filename>RenderingEngine.py
from Camera import *
class Engine:
def __init__(self, vertices, camera_position):
self.vertices = vertices
self.camera = Camera(camera_position)
self.version = "0.1"
def render(self):
self.camera.render(self.vertices)
def __str__(self):
return f"3D Rendering Engine by Nathcat, Version: {self.version}, Vertices: {self.vertices}, Camera: {self.camera}"
| none | 1 | 2.802995 | 3 |
|
trace_simexp/_version.py | damar-wicaksono/trace-simexp | 0 | 6630076 | <filename>trace_simexp/_version.py
# -*- coding: utf-8 -*-
"""
trace_simexp._version
*********************
Module with version number unified across project, used in the module,
setup.py, and other command line interfaces.
"""
__version__ = "0.5.0"
| <filename>trace_simexp/_version.py
# -*- coding: utf-8 -*-
"""
trace_simexp._version
*********************
Module with version number unified across project, used in the module,
setup.py, and other command line interfaces.
"""
__version__ = "0.5.0"
| en | 0.684984 | # -*- coding: utf-8 -*- trace_simexp._version ********************* Module with version number unified across project, used in the module, setup.py, and other command line interfaces. | 0.981718 | 1 |
programmers/lv1/12930.py | KLumy/Basic-Algorithm | 1 | 6630077 | def solution(s: str):
words = s.split(" ")
answer = []
for word in words:
chars = list(word)
for i in range(len(chars)):
if i % 2 == 0:
chars[i] = chars[i].upper()
else:
chars[i] = chars[i].lower()
answer.append("".join(chars))
return " ".join(answer)
# return " ".join(
# map(
# lambda x: "".join(
# [a.lower() if i % 2 else a.upper() for i, a in enumerate(x)]
# ),
# s.split(" "),
# )
# )
if __name__ == "__main__":
i = "try hello world"
print(solution(i)) | def solution(s: str):
words = s.split(" ")
answer = []
for word in words:
chars = list(word)
for i in range(len(chars)):
if i % 2 == 0:
chars[i] = chars[i].upper()
else:
chars[i] = chars[i].lower()
answer.append("".join(chars))
return " ".join(answer)
# return " ".join(
# map(
# lambda x: "".join(
# [a.lower() if i % 2 else a.upper() for i, a in enumerate(x)]
# ),
# s.split(" "),
# )
# )
if __name__ == "__main__":
i = "try hello world"
print(solution(i)) | en | 0.113682 | # return " ".join( # map( # lambda x: "".join( # [a.lower() if i % 2 else a.upper() for i, a in enumerate(x)] # ), # s.split(" "), # ) # ) | 3.745451 | 4 |
QCPU_Setup/DWave-library/dist-packages/PySMT-0.7.0-py3.7.egg/pysmt/test/test_regressions.py | cogrpar/qcpuWARE | 1 | 6630078 | #
# This file is part of pySMT.
#
# Copyright 2014 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from six.moves import xrange
from six.moves import cStringIO
import pysmt.logics as logics
import pysmt.smtlib.commands as smtcmd
from pysmt.shortcuts import (Real, Plus, Symbol, Equals, And, Bool, Or, Not,
Div, LT, LE, Int, ToReal, Iff, Exists, Times, FALSE,
BVLShr, BVLShl, BVAShr, BV, BVAdd, BVULT, BVMul,
Select, Array)
from pysmt.shortcuts import Solver, get_env, qelim, get_model, TRUE, ExactlyOne
from pysmt.typing import REAL, BOOL, INT, BVType, FunctionType, ArrayType
from pysmt.test import (TestCase, skipIfSolverNotAvailable, skipIfNoSolverForLogic,
skipIfNoQEForLogic)
from pysmt.test import main
from pysmt.exceptions import ConvertExpressionError, PysmtValueError
from pysmt.test.examples import get_example_formulae
from pysmt.environment import Environment
from pysmt.rewritings import cnf_as_set
from pysmt.smtlib.parser import SmtLibParser
from pysmt.smtlib.commands import DECLARE_FUN
from pysmt.smtlib.script import SmtLibCommand
from pysmt.logics import get_closer_smtlib_logic
from pysmt.constants import Fraction
class TestRegressions(TestCase):
@skipIfSolverNotAvailable("msat")
@skipIfSolverNotAvailable("z3")
def test_plus_converts_correctly_n_ary_functions(self):
"""Handling of Plus n-ary functionality.
Only the first two elements were translated to the solver
"""
a = Symbol("a", REAL)
b = Symbol("b", REAL)
c = Symbol("c", REAL)
p1 = Plus(a, Real((1,6)), b,c,)
p2 = Plus(a, b, c, Real((1,6)))
self.assertValid(Equals(p1, p2))
self.assertValid(Equals(p1, p2), solver_name='z3')
self.assertValid(Equals(p1, p2), solver_name='msat')
def test_substitute_memoization(self):
a = Symbol("A", BOOL)
b = Symbol("B", BOOL)
f = And(a, b)
g = f.substitute({a:Bool(True)})
h = f.substitute({a:Bool(False)})
self.assertNotEqual(h, g)
@skipIfSolverNotAvailable("msat")
def test_msat_bool_back_conversion(self):
f = Symbol("A")
with Solver(name='msat') as solver:
solver.solve()
val = solver.get_value(Symbol("A"))
self.assertTrue(val.is_bool_constant())
@skipIfSolverNotAvailable("msat")
@skipIfSolverNotAvailable("z3")
def test_conversion_of_fractions_in_z3(self):
self.assertValid(Equals(Real(Fraction(1,9)),
Div(Real(1), Real(9))),
solver_name="msat")
self.assertValid(Equals(Real(Fraction(1,9)),
Div(Real(1), Real(9))),
solver_name="z3")
def test_simplifying_int_plus_changes_type_of_expression(self):
varA = Symbol("At", INT)
varB = Symbol("Bt", INT)
get_type = get_env().stc.get_type
f = Plus(varB, Int(1))
old_type = get_type(f)
f = f.simplify()
new_type = get_type(f)
self.assertEqual(new_type, old_type)
@skipIfNoSolverForLogic(logics.QF_UFLIRA)
def test_nary_operators_in_solver_converter(self):
"""Conversion of n-ary operators was not handled correctly by converters."""
x = Symbol("x")
r = Symbol("p", REAL)
f_and_one = And(x)
f_or_one = Or(x)
f_plus_one = LT(Plus(r), Real(0))
ten_x = [x,x,x,x,x,x,x,x,x,x]
f_and_many = And(ten_x)
f_or_many = Or(ten_x)
f_plus_many = LT(Plus(r,r,r,r,r,r,r,r,r,r,r), Real(0))
for name in get_env().factory.all_solvers(logic=logics.QF_BOOL):
self.assertSat(f_and_one, solver_name=name)
self.assertSat(f_or_one, solver_name=name)
self.assertSat(f_and_many, solver_name=name)
self.assertSat(f_or_many, solver_name=name)
for name in get_env().factory.all_solvers(logic=logics.QF_UFLIRA):
self.assertSat(f_plus_one, solver_name=name)
self.assertSat(f_plus_many, solver_name=name)
def test_dependencies_not_includes_toreal(self):
p = Symbol("p", INT)
r = ToReal(p)
deps = r.get_free_variables()
self.assertIn(p, deps)
self.assertNotIn(r, deps)
def test_infix_notation_wrong_le(self):
p = Symbol("p", INT)
get_env().enable_infix_notation = True
self.assertEqual(LE(p, Int(2)), p <= Int(2))
def test_multiple_declaration_w_same_functiontype(self):
ft1 = FunctionType(REAL, [REAL])
ft2 = FunctionType(REAL, [REAL])
f1 = Symbol("f1", ft1)
# The following raises an exception if not (ft1 == ft2)
# since the same symbol has already been defined with
# a "different" type.
f1 = Symbol("f1", ft2)
@skipIfSolverNotAvailable("z3")
def test_z3_iff(self):
z3 = Solver(name="z3")
conv = z3.converter
x, y = Symbol("x"), Symbol("y")
term = conv.convert(Iff(x, y))
back = conv.back(term)
self.assertEqual(Iff(x, y), back)
@skipIfSolverNotAvailable("msat")
def test_msat_iff(self):
msat = Solver(name="msat")
conv = msat.converter
x, y = Symbol("x"), Symbol("y")
term = conv.convert(Iff(x, y))
back = conv.back(term)
# Mathsat can reorder variables...
self.assertTrue(Iff(x, y) == back or Iff(y, x) == back)
def test_multiple_exit(self):
for sname in get_env().factory.all_solvers():
# Multiple exits should be ignored
s = Solver(name=sname)
s.exit()
s.exit()
self.assertTrue(True)
@skipIfNoQEForLogic(logics.LIA)
def test_lia_qe_requiring_modulus(self):
x = Symbol("x", INT)
y = Symbol("y", INT)
f = Exists([x], Equals(y, Times(x, Int(2))))
with self.assertRaises(ConvertExpressionError):
qelim(f)
try:
qelim(f)
except ConvertExpressionError as ex:
# The modulus operator must be there
self.assertTrue("%2" in str(ex.expression) or \
"int_mod_congr" in str(ex.expression))
@skipIfSolverNotAvailable("msat")
def test_msat_partial_model(self):
msat = Solver(name="msat")
x, y = Symbol("x"), Symbol("y")
msat.add_assertion(x)
c = msat.solve()
self.assertTrue(c)
model = msat.get_model()
self.assertNotIn(y, model)
self.assertIn(x, model)
msat.exit()
@skipIfSolverNotAvailable("z3")
def test_z3_model_iteration(self):
x, y = Symbol("x"), Symbol("y")
m = get_model(And(x, y), solver_name="z3")
self.assertIsNotNone(m)
for _, v in m:
self.assertEqual(v, TRUE())
def test_exactlyone_w_generator(self):
x, y = Symbol("x"), Symbol("y")
elems = [x,y]
f1 = ExactlyOne(elems)
f2 = ExactlyOne(e for e in elems)
self.assertEqual(f1, f2)
def test_determinism(self):
def get_set(env):
mgr = env.formula_manager
r = set(mgr.Symbol("x%d" % i) for i in xrange(1000))
for (f, _, _, _) in get_example_formulae(env):
r |= set([f])
return r
# As first thing on the environment we build the set of formulae
l1 = list(get_set(get_env()))
# We try this ten times...
for _ in xrange(10):
# Do something to screw up memory layout...
for y in (Symbol("y%d" % i) for i in xrange(1000)):
self.assertIsNotNone(y)
with Environment() as new_env:
# As first thing on the environment we build the set of formulae
l_test = list(get_set(new_env))
# The ordering of the sets should be the same...
for i,f in enumerate(l1):
nf = new_env.formula_manager.normalize(f)
self.assertEqual(nf, l_test[i])
def test_is_one(self):
self.assertTrue(Int(1).is_one())
self.assertTrue(Real(1).is_one())
self.assertTrue(Int(0).is_zero())
self.assertTrue(Real(0).is_zero())
def test_cnf_as_set(self):
r = cnf_as_set(Symbol("x"))
self.assertTrue(type(r) == frozenset)
def test_substitute_to_real(self):
p = Symbol("p", INT)
f = LT(ToReal(p), Real(0))
new_f = f.substitute({p: Real(1)}).simplify()
self.assertEqual(new_f, Bool(False))
def test_empty_string_symbol(self):
with self.assertRaises(PysmtValueError):
Symbol("")
def test_smtlib_info_quoting(self):
cmd = SmtLibCommand(smtcmd.SET_INFO, [":source", "This\nis\nmultiline!"])
output = cmd.serialize_to_string()
self.assertEqual(output, "(set-info :source |This\nis\nmultiline!|)")
def test_parse_define_fun(self):
smtlib_input = "(declare-fun z () Bool)"\
"(define-fun .def_1 ((z Bool)) Bool (and z z))"
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
parser.get_script(buffer_)
def test_parse_define_fun_bind(self):
smtlib_input = "(declare-fun y () Bool)"\
"(define-fun .def_1 ((z Bool)) Bool (and z z))"
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
parser.get_script(buffer_)
def test_parse_bvx_var(self):
"""bvX is a valid identifier."""
smtlib_input = """
(declare-fun bv1 () (_ BitVec 8))
(assert (bvult (_ bv0 8) (bvmul (bvadd bv1 (_ bv1 8)) (_ bv5 8))))
(check-sat)"""
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
script = parser.get_script(buffer_)
# Check Parsed result
iscript = iter(script)
cmd = next(iscript)
self.assertEqual(cmd.name, DECLARE_FUN)
bv1 = cmd.args[0]
self.assertEqual(bv1.symbol_type().width, 8)
cmd = next(iscript)
parsed_f = cmd.args[0]
target_f = BVULT(BV(0, 8),
BVMul(BVAdd(bv1, BV(1, 8)), BV(5, 8)))
self.assertEqual(parsed_f, target_f)
def test_simplify_times(self):
a,b = Real(5), Real((1,5))
f = Times(a,b).simplify()
self.assertEqual(f.constant_value(), 1)
@skipIfSolverNotAvailable("yices")
def test_yices_push(self):
with Solver(name="yices") as solver:
solver.add_assertion(FALSE())
res = solver.solve()
self.assertFalse(res)
solver.push()
solver.add_assertion(TRUE())
res = solver.solve()
self.assertFalse(res)
solver.pop()
def test_qf_bool_smt2(self):
# QF_BOOL does not exist in SMT-LIB
# This test is to enforce the consistent choice of QF_UF
close_l = get_closer_smtlib_logic(logics.QF_BOOL)
self.assertEqual(close_l, logics.QF_UF)
# For BOOL we use LRA
close_l = get_closer_smtlib_logic(logics.BOOL)
self.assertEqual(close_l, logics.LRA)
def test_exactly_one_unpacking(self):
s1,s2 = Symbol("x"), Symbol("y")
f1 = ExactlyOne((s for s in [s1,s2]))
f2 = ExactlyOne([s1,s2])
f3 = ExactlyOne(s1,s2)
self.assertEqual(f1,f2)
self.assertEqual(f2,f3)
@skipIfSolverNotAvailable("btor")
def test_btor_bitwidth_bug_in_shift(self):
# (384, 384, 9)
# (x69 >> 1_384)
s = Solver(name="btor")
x69 = Symbol("x69", BVType(384))
# BVLShr
f = BVLShr(x69, BV(1, 384))
c = s.converter.convert(f)
self.assertIsNotNone(c)
# BVLShl
f = BVLShl(x69, BV(1, 384))
c = s.converter.convert(f)
self.assertIsNotNone(c)
# BVAShr
f = BVAShr(x69, BV(1, 384))
c = s.converter.convert(f)
self.assertIsNotNone(c)
@skipIfSolverNotAvailable("btor")
def test_btor_get_non_bool_value(self):
with Solver(name="btor") as s:
x = Symbol("x", BVType(16))
s.add_assertion(Equals(x, BV(1, 16)))
self.assertTrue(s.solve())
self.assertEqual(s.get_value(Equals(x, BV(1, 16))), TRUE())
self.assertEqual(s.get_value(BVAdd(x, BV(1, 16))), BV(2, 16))
@skipIfSolverNotAvailable("btor")
def test_btor_get_array_element(self):
with Solver(name="btor") as s:
x = Symbol("a", ArrayType(BVType(16), BVType(16)))
s.add_assertion(Equals(Select(x, BV(1, 16)), BV(1, 16)))
s.add_assertion(Equals(Select(x, BV(2, 16)), BV(3, 16)))
self.assertTrue(s.solve())
self.assertEqual(s.get_value(Select(x, BV(1, 16))), BV(1, 16))
self.assertIsNotNone(s.get_value(x))
def test_smtlib_define_fun_serialization(self):
smtlib_input = "(define-fun init ((x Bool)) Bool (and x (and x (and x (and x (and x (and x x)))))))"
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
s = parser.get_script(buffer_)
for c in s:
res = c.serialize_to_string(daggify=False)
self.assertEqual(res, smtlib_input)
@skipIfSolverNotAvailable("z3")
def test_z3_nary_back(self):
from z3 import Tactic
r = Symbol("r", REAL)
s = Symbol("s", REAL)
t = Symbol("t", REAL)
f = Equals(Times(r,s,t), Real(0))
with Solver(name="z3") as solver:
z3_f = solver.converter.convert(f)
z3_f = Tactic('simplify')(z3_f).as_expr()
fp = solver.converter.back(z3_f)
self.assertValid(Iff(f, fp), (f, fp))
def test_array_initialization_printing(self):
self.assertEqual(str(Array(INT, Int(0), {Int(1):Int(2)})), "Array{Int, Int}(0)[1 := 2]")
def test_git_version(self):
from pysmt import git_version
v = git_version()
self.assertIsNotNone(v)
parts = v.split("-")
self.assertTrue(len(parts) , 4)
@skipIfSolverNotAvailable("btor")
def test_boolector_assumptions(self):
with Solver(name='btor') as solver:
x = Symbol('x')
y = Symbol('y')
solver.add_assertion(Or(x, y))
solver.solve([Not(x), Not(y)])
btor_notx = solver.converter.convert(Not(x))
btor_noty = solver.converter.convert(Not(y))
self.assertEqual(solver.btor.Failed(btor_notx, btor_noty),
[True, True])
def test_parse_declare_const(self):
smtlib_input = """
(declare-const s Int)
(check-sat)"""
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
script = parser.get_script(buffer_)
self.assertIsNotNone(script)
if __name__ == "__main__":
main()
| #
# This file is part of pySMT.
#
# Copyright 2014 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from six.moves import xrange
from six.moves import cStringIO
import pysmt.logics as logics
import pysmt.smtlib.commands as smtcmd
from pysmt.shortcuts import (Real, Plus, Symbol, Equals, And, Bool, Or, Not,
Div, LT, LE, Int, ToReal, Iff, Exists, Times, FALSE,
BVLShr, BVLShl, BVAShr, BV, BVAdd, BVULT, BVMul,
Select, Array)
from pysmt.shortcuts import Solver, get_env, qelim, get_model, TRUE, ExactlyOne
from pysmt.typing import REAL, BOOL, INT, BVType, FunctionType, ArrayType
from pysmt.test import (TestCase, skipIfSolverNotAvailable, skipIfNoSolverForLogic,
skipIfNoQEForLogic)
from pysmt.test import main
from pysmt.exceptions import ConvertExpressionError, PysmtValueError
from pysmt.test.examples import get_example_formulae
from pysmt.environment import Environment
from pysmt.rewritings import cnf_as_set
from pysmt.smtlib.parser import SmtLibParser
from pysmt.smtlib.commands import DECLARE_FUN
from pysmt.smtlib.script import SmtLibCommand
from pysmt.logics import get_closer_smtlib_logic
from pysmt.constants import Fraction
class TestRegressions(TestCase):
@skipIfSolverNotAvailable("msat")
@skipIfSolverNotAvailable("z3")
def test_plus_converts_correctly_n_ary_functions(self):
"""Handling of Plus n-ary functionality.
Only the first two elements were translated to the solver
"""
a = Symbol("a", REAL)
b = Symbol("b", REAL)
c = Symbol("c", REAL)
p1 = Plus(a, Real((1,6)), b,c,)
p2 = Plus(a, b, c, Real((1,6)))
self.assertValid(Equals(p1, p2))
self.assertValid(Equals(p1, p2), solver_name='z3')
self.assertValid(Equals(p1, p2), solver_name='msat')
def test_substitute_memoization(self):
a = Symbol("A", BOOL)
b = Symbol("B", BOOL)
f = And(a, b)
g = f.substitute({a:Bool(True)})
h = f.substitute({a:Bool(False)})
self.assertNotEqual(h, g)
@skipIfSolverNotAvailable("msat")
def test_msat_bool_back_conversion(self):
f = Symbol("A")
with Solver(name='msat') as solver:
solver.solve()
val = solver.get_value(Symbol("A"))
self.assertTrue(val.is_bool_constant())
@skipIfSolverNotAvailable("msat")
@skipIfSolverNotAvailable("z3")
def test_conversion_of_fractions_in_z3(self):
self.assertValid(Equals(Real(Fraction(1,9)),
Div(Real(1), Real(9))),
solver_name="msat")
self.assertValid(Equals(Real(Fraction(1,9)),
Div(Real(1), Real(9))),
solver_name="z3")
def test_simplifying_int_plus_changes_type_of_expression(self):
varA = Symbol("At", INT)
varB = Symbol("Bt", INT)
get_type = get_env().stc.get_type
f = Plus(varB, Int(1))
old_type = get_type(f)
f = f.simplify()
new_type = get_type(f)
self.assertEqual(new_type, old_type)
@skipIfNoSolverForLogic(logics.QF_UFLIRA)
def test_nary_operators_in_solver_converter(self):
"""Conversion of n-ary operators was not handled correctly by converters."""
x = Symbol("x")
r = Symbol("p", REAL)
f_and_one = And(x)
f_or_one = Or(x)
f_plus_one = LT(Plus(r), Real(0))
ten_x = [x,x,x,x,x,x,x,x,x,x]
f_and_many = And(ten_x)
f_or_many = Or(ten_x)
f_plus_many = LT(Plus(r,r,r,r,r,r,r,r,r,r,r), Real(0))
for name in get_env().factory.all_solvers(logic=logics.QF_BOOL):
self.assertSat(f_and_one, solver_name=name)
self.assertSat(f_or_one, solver_name=name)
self.assertSat(f_and_many, solver_name=name)
self.assertSat(f_or_many, solver_name=name)
for name in get_env().factory.all_solvers(logic=logics.QF_UFLIRA):
self.assertSat(f_plus_one, solver_name=name)
self.assertSat(f_plus_many, solver_name=name)
def test_dependencies_not_includes_toreal(self):
p = Symbol("p", INT)
r = ToReal(p)
deps = r.get_free_variables()
self.assertIn(p, deps)
self.assertNotIn(r, deps)
def test_infix_notation_wrong_le(self):
p = Symbol("p", INT)
get_env().enable_infix_notation = True
self.assertEqual(LE(p, Int(2)), p <= Int(2))
def test_multiple_declaration_w_same_functiontype(self):
ft1 = FunctionType(REAL, [REAL])
ft2 = FunctionType(REAL, [REAL])
f1 = Symbol("f1", ft1)
# The following raises an exception if not (ft1 == ft2)
# since the same symbol has already been defined with
# a "different" type.
f1 = Symbol("f1", ft2)
@skipIfSolverNotAvailable("z3")
def test_z3_iff(self):
z3 = Solver(name="z3")
conv = z3.converter
x, y = Symbol("x"), Symbol("y")
term = conv.convert(Iff(x, y))
back = conv.back(term)
self.assertEqual(Iff(x, y), back)
@skipIfSolverNotAvailable("msat")
def test_msat_iff(self):
msat = Solver(name="msat")
conv = msat.converter
x, y = Symbol("x"), Symbol("y")
term = conv.convert(Iff(x, y))
back = conv.back(term)
# Mathsat can reorder variables...
self.assertTrue(Iff(x, y) == back or Iff(y, x) == back)
def test_multiple_exit(self):
for sname in get_env().factory.all_solvers():
# Multiple exits should be ignored
s = Solver(name=sname)
s.exit()
s.exit()
self.assertTrue(True)
@skipIfNoQEForLogic(logics.LIA)
def test_lia_qe_requiring_modulus(self):
x = Symbol("x", INT)
y = Symbol("y", INT)
f = Exists([x], Equals(y, Times(x, Int(2))))
with self.assertRaises(ConvertExpressionError):
qelim(f)
try:
qelim(f)
except ConvertExpressionError as ex:
# The modulus operator must be there
self.assertTrue("%2" in str(ex.expression) or \
"int_mod_congr" in str(ex.expression))
@skipIfSolverNotAvailable("msat")
def test_msat_partial_model(self):
msat = Solver(name="msat")
x, y = Symbol("x"), Symbol("y")
msat.add_assertion(x)
c = msat.solve()
self.assertTrue(c)
model = msat.get_model()
self.assertNotIn(y, model)
self.assertIn(x, model)
msat.exit()
@skipIfSolverNotAvailable("z3")
def test_z3_model_iteration(self):
x, y = Symbol("x"), Symbol("y")
m = get_model(And(x, y), solver_name="z3")
self.assertIsNotNone(m)
for _, v in m:
self.assertEqual(v, TRUE())
def test_exactlyone_w_generator(self):
x, y = Symbol("x"), Symbol("y")
elems = [x,y]
f1 = ExactlyOne(elems)
f2 = ExactlyOne(e for e in elems)
self.assertEqual(f1, f2)
def test_determinism(self):
def get_set(env):
mgr = env.formula_manager
r = set(mgr.Symbol("x%d" % i) for i in xrange(1000))
for (f, _, _, _) in get_example_formulae(env):
r |= set([f])
return r
# As first thing on the environment we build the set of formulae
l1 = list(get_set(get_env()))
# We try this ten times...
for _ in xrange(10):
# Do something to screw up memory layout...
for y in (Symbol("y%d" % i) for i in xrange(1000)):
self.assertIsNotNone(y)
with Environment() as new_env:
# As first thing on the environment we build the set of formulae
l_test = list(get_set(new_env))
# The ordering of the sets should be the same...
for i,f in enumerate(l1):
nf = new_env.formula_manager.normalize(f)
self.assertEqual(nf, l_test[i])
def test_is_one(self):
self.assertTrue(Int(1).is_one())
self.assertTrue(Real(1).is_one())
self.assertTrue(Int(0).is_zero())
self.assertTrue(Real(0).is_zero())
def test_cnf_as_set(self):
r = cnf_as_set(Symbol("x"))
self.assertTrue(type(r) == frozenset)
def test_substitute_to_real(self):
p = Symbol("p", INT)
f = LT(ToReal(p), Real(0))
new_f = f.substitute({p: Real(1)}).simplify()
self.assertEqual(new_f, Bool(False))
def test_empty_string_symbol(self):
with self.assertRaises(PysmtValueError):
Symbol("")
def test_smtlib_info_quoting(self):
cmd = SmtLibCommand(smtcmd.SET_INFO, [":source", "This\nis\nmultiline!"])
output = cmd.serialize_to_string()
self.assertEqual(output, "(set-info :source |This\nis\nmultiline!|)")
def test_parse_define_fun(self):
smtlib_input = "(declare-fun z () Bool)"\
"(define-fun .def_1 ((z Bool)) Bool (and z z))"
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
parser.get_script(buffer_)
def test_parse_define_fun_bind(self):
smtlib_input = "(declare-fun y () Bool)"\
"(define-fun .def_1 ((z Bool)) Bool (and z z))"
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
parser.get_script(buffer_)
def test_parse_bvx_var(self):
"""bvX is a valid identifier."""
smtlib_input = """
(declare-fun bv1 () (_ BitVec 8))
(assert (bvult (_ bv0 8) (bvmul (bvadd bv1 (_ bv1 8)) (_ bv5 8))))
(check-sat)"""
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
script = parser.get_script(buffer_)
# Check Parsed result
iscript = iter(script)
cmd = next(iscript)
self.assertEqual(cmd.name, DECLARE_FUN)
bv1 = cmd.args[0]
self.assertEqual(bv1.symbol_type().width, 8)
cmd = next(iscript)
parsed_f = cmd.args[0]
target_f = BVULT(BV(0, 8),
BVMul(BVAdd(bv1, BV(1, 8)), BV(5, 8)))
self.assertEqual(parsed_f, target_f)
def test_simplify_times(self):
a,b = Real(5), Real((1,5))
f = Times(a,b).simplify()
self.assertEqual(f.constant_value(), 1)
@skipIfSolverNotAvailable("yices")
def test_yices_push(self):
with Solver(name="yices") as solver:
solver.add_assertion(FALSE())
res = solver.solve()
self.assertFalse(res)
solver.push()
solver.add_assertion(TRUE())
res = solver.solve()
self.assertFalse(res)
solver.pop()
def test_qf_bool_smt2(self):
# QF_BOOL does not exist in SMT-LIB
# This test is to enforce the consistent choice of QF_UF
close_l = get_closer_smtlib_logic(logics.QF_BOOL)
self.assertEqual(close_l, logics.QF_UF)
# For BOOL we use LRA
close_l = get_closer_smtlib_logic(logics.BOOL)
self.assertEqual(close_l, logics.LRA)
def test_exactly_one_unpacking(self):
s1,s2 = Symbol("x"), Symbol("y")
f1 = ExactlyOne((s for s in [s1,s2]))
f2 = ExactlyOne([s1,s2])
f3 = ExactlyOne(s1,s2)
self.assertEqual(f1,f2)
self.assertEqual(f2,f3)
@skipIfSolverNotAvailable("btor")
def test_btor_bitwidth_bug_in_shift(self):
# (384, 384, 9)
# (x69 >> 1_384)
s = Solver(name="btor")
x69 = Symbol("x69", BVType(384))
# BVLShr
f = BVLShr(x69, BV(1, 384))
c = s.converter.convert(f)
self.assertIsNotNone(c)
# BVLShl
f = BVLShl(x69, BV(1, 384))
c = s.converter.convert(f)
self.assertIsNotNone(c)
# BVAShr
f = BVAShr(x69, BV(1, 384))
c = s.converter.convert(f)
self.assertIsNotNone(c)
@skipIfSolverNotAvailable("btor")
def test_btor_get_non_bool_value(self):
with Solver(name="btor") as s:
x = Symbol("x", BVType(16))
s.add_assertion(Equals(x, BV(1, 16)))
self.assertTrue(s.solve())
self.assertEqual(s.get_value(Equals(x, BV(1, 16))), TRUE())
self.assertEqual(s.get_value(BVAdd(x, BV(1, 16))), BV(2, 16))
@skipIfSolverNotAvailable("btor")
def test_btor_get_array_element(self):
with Solver(name="btor") as s:
x = Symbol("a", ArrayType(BVType(16), BVType(16)))
s.add_assertion(Equals(Select(x, BV(1, 16)), BV(1, 16)))
s.add_assertion(Equals(Select(x, BV(2, 16)), BV(3, 16)))
self.assertTrue(s.solve())
self.assertEqual(s.get_value(Select(x, BV(1, 16))), BV(1, 16))
self.assertIsNotNone(s.get_value(x))
def test_smtlib_define_fun_serialization(self):
smtlib_input = "(define-fun init ((x Bool)) Bool (and x (and x (and x (and x (and x (and x x)))))))"
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
s = parser.get_script(buffer_)
for c in s:
res = c.serialize_to_string(daggify=False)
self.assertEqual(res, smtlib_input)
@skipIfSolverNotAvailable("z3")
def test_z3_nary_back(self):
from z3 import Tactic
r = Symbol("r", REAL)
s = Symbol("s", REAL)
t = Symbol("t", REAL)
f = Equals(Times(r,s,t), Real(0))
with Solver(name="z3") as solver:
z3_f = solver.converter.convert(f)
z3_f = Tactic('simplify')(z3_f).as_expr()
fp = solver.converter.back(z3_f)
self.assertValid(Iff(f, fp), (f, fp))
def test_array_initialization_printing(self):
self.assertEqual(str(Array(INT, Int(0), {Int(1):Int(2)})), "Array{Int, Int}(0)[1 := 2]")
def test_git_version(self):
from pysmt import git_version
v = git_version()
self.assertIsNotNone(v)
parts = v.split("-")
self.assertTrue(len(parts) , 4)
@skipIfSolverNotAvailable("btor")
def test_boolector_assumptions(self):
with Solver(name='btor') as solver:
x = Symbol('x')
y = Symbol('y')
solver.add_assertion(Or(x, y))
solver.solve([Not(x), Not(y)])
btor_notx = solver.converter.convert(Not(x))
btor_noty = solver.converter.convert(Not(y))
self.assertEqual(solver.btor.Failed(btor_notx, btor_noty),
[True, True])
def test_parse_declare_const(self):
smtlib_input = """
(declare-const s Int)
(check-sat)"""
parser = SmtLibParser()
buffer_ = cStringIO(smtlib_input)
script = parser.get_script(buffer_)
self.assertIsNotNone(script)
if __name__ == "__main__":
main()
| en | 0.846586 | # # This file is part of pySMT. # # Copyright 2014 <NAME> and <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Handling of Plus n-ary functionality. Only the first two elements were translated to the solver Conversion of n-ary operators was not handled correctly by converters. # The following raises an exception if not (ft1 == ft2) # since the same symbol has already been defined with # a "different" type. # Mathsat can reorder variables... # Multiple exits should be ignored # The modulus operator must be there # As first thing on the environment we build the set of formulae # We try this ten times... # Do something to screw up memory layout... # As first thing on the environment we build the set of formulae # The ordering of the sets should be the same... bvX is a valid identifier. (declare-fun bv1 () (_ BitVec 8)) (assert (bvult (_ bv0 8) (bvmul (bvadd bv1 (_ bv1 8)) (_ bv5 8)))) (check-sat) # Check Parsed result # QF_BOOL does not exist in SMT-LIB # This test is to enforce the consistent choice of QF_UF # For BOOL we use LRA # (384, 384, 9) # (x69 >> 1_384) # BVLShr # BVLShl # BVAShr (declare-const s Int) (check-sat) | 1.667767 | 2 |
settings.py | Laurys2517/cueh-bot | 0 | 6630079 | bot_token = <token> #token
version = '0.3' #version del bot
message_limit=500 | bot_token = <token> #token
version = '0.3' #version del bot
message_limit=500 | ca | 0.136474 | #token #version del bot | 1.096673 | 1 |
skyportal/handlers/api/instrument.py | bparazin/skyportal | 52 | 6630080 | <reponame>bparazin/skyportal
from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from baselayer.log import make_log
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import sessionmaker, scoped_session
from tornado.ioloop import IOLoop
from healpix_alchemy import Tile
from regions import Regions
from astropy import coordinates
from astropy import units as u
import numpy as np
from ..base import BaseHandler
from ...models import (
DBSession,
Instrument,
Telescope,
InstrumentField,
InstrumentFieldTile,
)
from ...enum_types import ALLOWED_BANDPASSES
log = make_log('api/instrument')
Session = scoped_session(sessionmaker(bind=DBSession.session_factory.kw["bind"]))
class InstrumentHandler(BaseHandler):
@permissions(['System admin'])
def post(self):
# See bottom of this file for redoc docstring -- moved it there so that
# it could be made an f-string.
data = self.get_json()
telescope_id = data.get('telescope_id')
telescope = Telescope.get_if_accessible_by(
telescope_id, self.current_user, raise_if_none=True, mode="read"
)
field_data = data.pop("field_data", None)
field_region = data.pop("field_region", None)
schema = Instrument.__schema__()
try:
instrument = schema.load(data)
except ValidationError as exc:
return self.error(
'Invalid/missing parameters: ' f'{exc.normalized_messages()}'
)
existing_instrument = (
Instrument.query_records_accessible_by(
self.current_user,
)
.filter(
Instrument.name == data.get('name'),
Instrument.telescope_id == telescope_id,
)
.first()
)
if existing_instrument is None:
instrument.telescope = telescope
DBSession().add(instrument)
DBSession().commit()
else:
instrument = existing_instrument
if field_data is not None:
if field_region is None:
return self.error('`field_region` is required with field_data')
regions = Regions.parse(field_region, format='ds9')
# run async
IOLoop.current().run_in_executor(
None,
lambda: add_tiles(instrument.id, instrument.name, regions, field_data),
)
return self.success(data={"id": instrument.id})
@auth_or_token
def get(self, instrument_id=None):
"""
---
single:
description: Retrieve an instrument
tags:
- instruments
parameters:
- in: path
name: instrument_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleInstrument
400:
content:
application/json:
schema: Error
multiple:
description: Retrieve all instruments
tags:
- instruments
parameters:
- in: query
name: name
schema:
type: string
description: Filter by name (exact match)
responses:
200:
content:
application/json:
schema: ArrayOfInstruments
400:
content:
application/json:
schema: Error
"""
if instrument_id is not None:
instrument = Instrument.get_if_accessible_by(
int(instrument_id),
self.current_user,
raise_if_none=True,
mode="read",
options=[joinedload(Instrument.fields)],
)
return self.success(data=instrument)
inst_name = self.get_query_argument("name", None)
query = Instrument.query_records_accessible_by(
self.current_user,
mode="read",
options=[
joinedload(Instrument.fields),
],
)
if inst_name is not None:
query = query.filter(Instrument.name == inst_name)
instruments = query.all()
self.verify_and_commit()
return self.success(data=instruments)
@permissions(['System admin'])
def put(self, instrument_id):
"""
---
description: Update instrument
tags:
- instruments
parameters:
- in: path
name: instrument_id
required: true
schema:
type: integer
requestBody:
content:
application/json:
schema: InstrumentNoID
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
data = self.get_json()
data['id'] = int(instrument_id)
# permission check
_ = Instrument.get_if_accessible_by(
int(instrument_id), self.current_user, raise_if_none=True, mode='update'
)
schema = Instrument.__schema__()
try:
schema.load(data, partial=True)
except ValidationError as exc:
return self.error(
'Invalid/missing parameters: ' f'{exc.normalized_messages()}'
)
self.verify_and_commit()
return self.success()
@permissions(['System admin'])
def delete(self, instrument_id):
"""
---
description: Delete an instrument
tags:
- instruments
parameters:
- in: path
name: instrument_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
instrument = Instrument.get_if_accessible_by(
int(instrument_id), self.current_user, raise_if_none=True, mode='update'
)
DBSession().delete(instrument)
self.verify_and_commit()
return self.success()
InstrumentHandler.post.__doc__ = f"""
---
description: Add a new instrument
tags:
- instruments
requestBody:
content:
application/json:
schema:
allOf:
- $ref: "#/components/schemas/InstrumentNoID"
- type: object
properties:
filters:
type: array
items:
type: string
enum: {list(ALLOWED_BANDPASSES)}
description: >-
List of filters on the instrument. If the instrument
has no filters (e.g., because it is a spectrograph),
leave blank or pass the empty list.
default: []
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: integer
description: New instrument ID
400:
content:
application/json:
schema: Error
"""
def add_tiles(instrument_id, instrument_name, regions, field_data):
session = Session()
try:
# Loop over the telescope tiles and create fields for each
skyoffset_frames = coordinates.SkyCoord(
field_data['RA'], field_data['Dec'], unit=u.deg
).skyoffset_frame()
ra = np.array([reg.vertices.ra for reg in regions])
dec = np.array([reg.vertices.dec for reg in regions])
coords = np.stack([ra, dec])
# Copy the tile coordinates such that there is one per field
# in the grid
coords_icrs = coordinates.SkyCoord(
*np.tile(coords[:, np.newaxis, ...], (len(field_data['RA']), 1, 1)),
unit=u.deg,
frame=skyoffset_frames[:, np.newaxis, np.newaxis],
).transform_to(coordinates.ICRS)
for ii, (field_id, ra, dec, coords) in enumerate(
zip(field_data['ID'], field_data['RA'], field_data['Dec'], coords_icrs)
):
contour = {
'properties': {
'instrument': instrument_name,
'field_id': int(field_id),
'ra': ra,
'dec': dec,
},
}
field = InstrumentField(
instrument_id=instrument_id, field_id=int(field_id), contour=contour
)
session.add(field)
session.commit()
tiles = []
for coord in coords:
for hpx in Tile.tiles_from_polygon_skycoord(coord):
tiles.append(
InstrumentFieldTile(
instrument_id=instrument_id,
instrument_field_id=field.id,
healpix=hpx,
)
)
session.add_all(tiles)
session.commit()
return log(f"Successfully generated fields for instrument {instrument_id}")
except Exception as e:
return log(f"Unable to generate fields for instrument {instrument_id}: {e}")
finally:
Session.remove()
| from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from baselayer.log import make_log
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import sessionmaker, scoped_session
from tornado.ioloop import IOLoop
from healpix_alchemy import Tile
from regions import Regions
from astropy import coordinates
from astropy import units as u
import numpy as np
from ..base import BaseHandler
from ...models import (
DBSession,
Instrument,
Telescope,
InstrumentField,
InstrumentFieldTile,
)
from ...enum_types import ALLOWED_BANDPASSES
log = make_log('api/instrument')
Session = scoped_session(sessionmaker(bind=DBSession.session_factory.kw["bind"]))
class InstrumentHandler(BaseHandler):
@permissions(['System admin'])
def post(self):
# See bottom of this file for redoc docstring -- moved it there so that
# it could be made an f-string.
data = self.get_json()
telescope_id = data.get('telescope_id')
telescope = Telescope.get_if_accessible_by(
telescope_id, self.current_user, raise_if_none=True, mode="read"
)
field_data = data.pop("field_data", None)
field_region = data.pop("field_region", None)
schema = Instrument.__schema__()
try:
instrument = schema.load(data)
except ValidationError as exc:
return self.error(
'Invalid/missing parameters: ' f'{exc.normalized_messages()}'
)
existing_instrument = (
Instrument.query_records_accessible_by(
self.current_user,
)
.filter(
Instrument.name == data.get('name'),
Instrument.telescope_id == telescope_id,
)
.first()
)
if existing_instrument is None:
instrument.telescope = telescope
DBSession().add(instrument)
DBSession().commit()
else:
instrument = existing_instrument
if field_data is not None:
if field_region is None:
return self.error('`field_region` is required with field_data')
regions = Regions.parse(field_region, format='ds9')
# run async
IOLoop.current().run_in_executor(
None,
lambda: add_tiles(instrument.id, instrument.name, regions, field_data),
)
return self.success(data={"id": instrument.id})
@auth_or_token
def get(self, instrument_id=None):
"""
---
single:
description: Retrieve an instrument
tags:
- instruments
parameters:
- in: path
name: instrument_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleInstrument
400:
content:
application/json:
schema: Error
multiple:
description: Retrieve all instruments
tags:
- instruments
parameters:
- in: query
name: name
schema:
type: string
description: Filter by name (exact match)
responses:
200:
content:
application/json:
schema: ArrayOfInstruments
400:
content:
application/json:
schema: Error
"""
if instrument_id is not None:
instrument = Instrument.get_if_accessible_by(
int(instrument_id),
self.current_user,
raise_if_none=True,
mode="read",
options=[joinedload(Instrument.fields)],
)
return self.success(data=instrument)
inst_name = self.get_query_argument("name", None)
query = Instrument.query_records_accessible_by(
self.current_user,
mode="read",
options=[
joinedload(Instrument.fields),
],
)
if inst_name is not None:
query = query.filter(Instrument.name == inst_name)
instruments = query.all()
self.verify_and_commit()
return self.success(data=instruments)
@permissions(['System admin'])
def put(self, instrument_id):
"""
---
description: Update instrument
tags:
- instruments
parameters:
- in: path
name: instrument_id
required: true
schema:
type: integer
requestBody:
content:
application/json:
schema: InstrumentNoID
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
data = self.get_json()
data['id'] = int(instrument_id)
# permission check
_ = Instrument.get_if_accessible_by(
int(instrument_id), self.current_user, raise_if_none=True, mode='update'
)
schema = Instrument.__schema__()
try:
schema.load(data, partial=True)
except ValidationError as exc:
return self.error(
'Invalid/missing parameters: ' f'{exc.normalized_messages()}'
)
self.verify_and_commit()
return self.success()
@permissions(['System admin'])
def delete(self, instrument_id):
"""
---
description: Delete an instrument
tags:
- instruments
parameters:
- in: path
name: instrument_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
instrument = Instrument.get_if_accessible_by(
int(instrument_id), self.current_user, raise_if_none=True, mode='update'
)
DBSession().delete(instrument)
self.verify_and_commit()
return self.success()
InstrumentHandler.post.__doc__ = f"""
---
description: Add a new instrument
tags:
- instruments
requestBody:
content:
application/json:
schema:
allOf:
- $ref: "#/components/schemas/InstrumentNoID"
- type: object
properties:
filters:
type: array
items:
type: string
enum: {list(ALLOWED_BANDPASSES)}
description: >-
List of filters on the instrument. If the instrument
has no filters (e.g., because it is a spectrograph),
leave blank or pass the empty list.
default: []
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: integer
description: New instrument ID
400:
content:
application/json:
schema: Error
"""
def add_tiles(instrument_id, instrument_name, regions, field_data):
session = Session()
try:
# Loop over the telescope tiles and create fields for each
skyoffset_frames = coordinates.SkyCoord(
field_data['RA'], field_data['Dec'], unit=u.deg
).skyoffset_frame()
ra = np.array([reg.vertices.ra for reg in regions])
dec = np.array([reg.vertices.dec for reg in regions])
coords = np.stack([ra, dec])
# Copy the tile coordinates such that there is one per field
# in the grid
coords_icrs = coordinates.SkyCoord(
*np.tile(coords[:, np.newaxis, ...], (len(field_data['RA']), 1, 1)),
unit=u.deg,
frame=skyoffset_frames[:, np.newaxis, np.newaxis],
).transform_to(coordinates.ICRS)
for ii, (field_id, ra, dec, coords) in enumerate(
zip(field_data['ID'], field_data['RA'], field_data['Dec'], coords_icrs)
):
contour = {
'properties': {
'instrument': instrument_name,
'field_id': int(field_id),
'ra': ra,
'dec': dec,
},
}
field = InstrumentField(
instrument_id=instrument_id, field_id=int(field_id), contour=contour
)
session.add(field)
session.commit()
tiles = []
for coord in coords:
for hpx in Tile.tiles_from_polygon_skycoord(coord):
tiles.append(
InstrumentFieldTile(
instrument_id=instrument_id,
instrument_field_id=field.id,
healpix=hpx,
)
)
session.add_all(tiles)
session.commit()
return log(f"Successfully generated fields for instrument {instrument_id}")
except Exception as e:
return log(f"Unable to generate fields for instrument {instrument_id}: {e}")
finally:
Session.remove() | en | 0.639425 | # See bottom of this file for redoc docstring -- moved it there so that # it could be made an f-string. # run async --- single: description: Retrieve an instrument tags: - instruments parameters: - in: path name: instrument_id required: true schema: type: integer responses: 200: content: application/json: schema: SingleInstrument 400: content: application/json: schema: Error multiple: description: Retrieve all instruments tags: - instruments parameters: - in: query name: name schema: type: string description: Filter by name (exact match) responses: 200: content: application/json: schema: ArrayOfInstruments 400: content: application/json: schema: Error --- description: Update instrument tags: - instruments parameters: - in: path name: instrument_id required: true schema: type: integer requestBody: content: application/json: schema: InstrumentNoID responses: 200: content: application/json: schema: Success 400: content: application/json: schema: Error # permission check --- description: Delete an instrument tags: - instruments parameters: - in: path name: instrument_id required: true schema: type: integer responses: 200: content: application/json: schema: Success 400: content: application/json: schema: Error --- description: Add a new instrument tags: - instruments requestBody: content: application/json: schema: allOf: - $ref: "#/components/schemas/InstrumentNoID" - type: object properties: filters: type: array items: type: string enum: {list(ALLOWED_BANDPASSES)} description: >- List of filters on the instrument. If the instrument has no filters (e.g., because it is a spectrograph), leave blank or pass the empty list. default: [] responses: 200: content: application/json: schema: allOf: - $ref: '#/components/schemas/Success' - type: object properties: data: type: object properties: id: type: integer description: New instrument ID 400: content: application/json: schema: Error # Loop over the telescope tiles and create fields for each # Copy the tile coordinates such that there is one per field # in the grid | 2.109957 | 2 |
sdk/python/pulumi_aws_native/kinesisvideo/get_signaling_channel.py | pulumi/pulumi-aws-native | 29 | 6630081 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetSignalingChannelResult',
'AwaitableGetSignalingChannelResult',
'get_signaling_channel',
'get_signaling_channel_output',
]
@pulumi.output_type
class GetSignalingChannelResult:
def __init__(__self__, arn=None, message_ttl_seconds=None, tags=None, type=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if message_ttl_seconds and not isinstance(message_ttl_seconds, int):
raise TypeError("Expected argument 'message_ttl_seconds' to be a int")
pulumi.set(__self__, "message_ttl_seconds", message_ttl_seconds)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the Kinesis Video Signaling Channel.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="messageTtlSeconds")
def message_ttl_seconds(self) -> Optional[int]:
"""
The period of time a signaling channel retains undelivered messages before they are discarded.
"""
return pulumi.get(self, "message_ttl_seconds")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.SignalingChannelTag']]:
"""
An array of key-value pairs to apply to this resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional['SignalingChannelType']:
"""
The type of the Kinesis Video Signaling Channel to create. Currently, SINGLE_MASTER is the only supported channel type.
"""
return pulumi.get(self, "type")
class AwaitableGetSignalingChannelResult(GetSignalingChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSignalingChannelResult(
arn=self.arn,
message_ttl_seconds=self.message_ttl_seconds,
tags=self.tags,
type=self.type)
def get_signaling_channel(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSignalingChannelResult:
"""
Resource Type Definition for AWS::KinesisVideo::SignalingChannel
:param str name: The name of the Kinesis Video Signaling Channel.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:kinesisvideo:getSignalingChannel', __args__, opts=opts, typ=GetSignalingChannelResult).value
return AwaitableGetSignalingChannelResult(
arn=__ret__.arn,
message_ttl_seconds=__ret__.message_ttl_seconds,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_signaling_channel)
def get_signaling_channel_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSignalingChannelResult]:
"""
Resource Type Definition for AWS::KinesisVideo::SignalingChannel
:param str name: The name of the Kinesis Video Signaling Channel.
"""
...
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetSignalingChannelResult',
'AwaitableGetSignalingChannelResult',
'get_signaling_channel',
'get_signaling_channel_output',
]
@pulumi.output_type
class GetSignalingChannelResult:
def __init__(__self__, arn=None, message_ttl_seconds=None, tags=None, type=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if message_ttl_seconds and not isinstance(message_ttl_seconds, int):
raise TypeError("Expected argument 'message_ttl_seconds' to be a int")
pulumi.set(__self__, "message_ttl_seconds", message_ttl_seconds)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the Kinesis Video Signaling Channel.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="messageTtlSeconds")
def message_ttl_seconds(self) -> Optional[int]:
"""
The period of time a signaling channel retains undelivered messages before they are discarded.
"""
return pulumi.get(self, "message_ttl_seconds")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.SignalingChannelTag']]:
"""
An array of key-value pairs to apply to this resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional['SignalingChannelType']:
"""
The type of the Kinesis Video Signaling Channel to create. Currently, SINGLE_MASTER is the only supported channel type.
"""
return pulumi.get(self, "type")
class AwaitableGetSignalingChannelResult(GetSignalingChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSignalingChannelResult(
arn=self.arn,
message_ttl_seconds=self.message_ttl_seconds,
tags=self.tags,
type=self.type)
def get_signaling_channel(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSignalingChannelResult:
"""
Resource Type Definition for AWS::KinesisVideo::SignalingChannel
:param str name: The name of the Kinesis Video Signaling Channel.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:kinesisvideo:getSignalingChannel', __args__, opts=opts, typ=GetSignalingChannelResult).value
return AwaitableGetSignalingChannelResult(
arn=__ret__.arn,
message_ttl_seconds=__ret__.message_ttl_seconds,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_signaling_channel)
def get_signaling_channel_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSignalingChannelResult]:
"""
Resource Type Definition for AWS::KinesisVideo::SignalingChannel
:param str name: The name of the Kinesis Video Signaling Channel.
"""
...
| en | 0.787933 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The Amazon Resource Name (ARN) of the Kinesis Video Signaling Channel. The period of time a signaling channel retains undelivered messages before they are discarded. An array of key-value pairs to apply to this resource. The type of the Kinesis Video Signaling Channel to create. Currently, SINGLE_MASTER is the only supported channel type. # pylint: disable=using-constant-test Resource Type Definition for AWS::KinesisVideo::SignalingChannel :param str name: The name of the Kinesis Video Signaling Channel. Resource Type Definition for AWS::KinesisVideo::SignalingChannel :param str name: The name of the Kinesis Video Signaling Channel. | 1.980823 | 2 |
bare_python/s16_2_contextlib_abstractcontextmanager.py | AndreiHondrari/python_exploration | 3 | 6630082 | <filename>bare_python/s16_2_contextlib_abstractcontextmanager.py<gh_stars>1-10
#!python3
from typing import Any
from contextlib import AbstractContextManager as ACM
from ut import p
class A(ACM):
def __init__(self) -> None:
self.x = 10
def __exit__(self, *args: Any) -> None:
# needs to be implemented because it's abstract in ACM
pass
p("ACM normal")
with A() as a:
print(vars(a))
| <filename>bare_python/s16_2_contextlib_abstractcontextmanager.py<gh_stars>1-10
#!python3
from typing import Any
from contextlib import AbstractContextManager as ACM
from ut import p
class A(ACM):
def __init__(self) -> None:
self.x = 10
def __exit__(self, *args: Any) -> None:
# needs to be implemented because it's abstract in ACM
pass
p("ACM normal")
with A() as a:
print(vars(a))
| en | 0.944407 | #!python3 # needs to be implemented because it's abstract in ACM | 2.878039 | 3 |
odrive/Firmware/fibre/python/fibre/tcp_transport.py | kirmani/doggo | 0 | 6630083 |
import sys
import socket
import time
import traceback
import fibre.protocol
from fibre.utils import wait_any
def noprint(x):
pass
class TCPTransport(fibre.protocol.StreamSource, fibre.protocol.StreamSink):
def __init__(self, dest_addr, dest_port, logger):
# TODO: FIXME: use IPv6
# Problem: getaddrinfo fails if the resolver returns an
# IPv4 address, but we are using AF_INET6
#family = socket.AF_INET6 if socket.has_ipv6 else socket.AF_INET
family = socket.AF_INET
self.sock = socket.socket(family, socket.SOCK_STREAM)
# TODO: Determine the right address to use from the list
self.target = socket.getaddrinfo(dest_addr, dest_port, family)[0][4]
# TODO: this blocks until a connection is established, or the system cancels it
self.sock.connect(self.target)
def process_bytes(self, buffer):
self.sock.send(buffer)
def get_bytes(self, n_bytes, deadline):
"""
Returns n bytes unless the deadline is reached, in which case the bytes
that were read up to that point are returned. If deadline is None the
function blocks forever. A deadline before the current time corresponds
to non-blocking mode.
"""
# convert deadline to seconds (floating point)
deadline = None if deadline is None else max(deadline - time.monotonic(), 0)
self.sock.settimeout(deadline)
try:
data = self.sock.recv(n_bytes, socket.MSG_WAITALL) # receive n_bytes
return data
except socket.timeout:
# if we got a timeout data will still be none, so we call recv again
# this time in non blocking state and see if we can get some data
try:
return self.sock.recv(n_bytes, socket.MSG_DONTWAIT)
except socket.timeout:
raise TimeoutError
def get_bytes_or_fail(self, n_bytes, deadline):
result = self.get_bytes(n_bytes, deadline)
if len(result) < n_bytes:
raise TimeoutError("expected {} bytes but got only {}".format(n_bytes, len(result)))
return result
def discover_channels(path, serial_number, callback, cancellation_token, channel_termination_token, logger):
"""
Tries to connect to a TCP server based on the path spec.
This function blocks until cancellation_token is set.
Channels spawned by this function run until channel_termination_token is set.
"""
try:
dest_addr = ':'.join(path.split(":")[:-1])
dest_port = int(path.split(":")[-1])
except (ValueError, IndexError):
raise Exception('"{}" is not a valid TCP destination. The format should be something like "localhost:1234".'
.format(path))
while not cancellation_token.is_set():
try:
tcp_transport = fibre.tcp_transport.TCPTransport(dest_addr, dest_port, logger)
stream2packet_input = fibre.protocol.PacketFromStreamConverter(tcp_transport)
packet2stream_output = fibre.protocol.StreamBasedPacketSink(tcp_transport)
channel = fibre.protocol.Channel(
"TCP device {}:{}".format(dest_addr, dest_port),
stream2packet_input, packet2stream_output,
channel_termination_token, logger)
except:
#logger.debug("TCP channel init failed. More info: " + traceback.format_exc())
pass
else:
callback(channel)
wait_any(None, cancellation_token, channel._channel_broken)
time.sleep(1)
|
import sys
import socket
import time
import traceback
import fibre.protocol
from fibre.utils import wait_any
def noprint(x):
pass
class TCPTransport(fibre.protocol.StreamSource, fibre.protocol.StreamSink):
def __init__(self, dest_addr, dest_port, logger):
# TODO: FIXME: use IPv6
# Problem: getaddrinfo fails if the resolver returns an
# IPv4 address, but we are using AF_INET6
#family = socket.AF_INET6 if socket.has_ipv6 else socket.AF_INET
family = socket.AF_INET
self.sock = socket.socket(family, socket.SOCK_STREAM)
# TODO: Determine the right address to use from the list
self.target = socket.getaddrinfo(dest_addr, dest_port, family)[0][4]
# TODO: this blocks until a connection is established, or the system cancels it
self.sock.connect(self.target)
def process_bytes(self, buffer):
self.sock.send(buffer)
def get_bytes(self, n_bytes, deadline):
"""
Returns n bytes unless the deadline is reached, in which case the bytes
that were read up to that point are returned. If deadline is None the
function blocks forever. A deadline before the current time corresponds
to non-blocking mode.
"""
# convert deadline to seconds (floating point)
deadline = None if deadline is None else max(deadline - time.monotonic(), 0)
self.sock.settimeout(deadline)
try:
data = self.sock.recv(n_bytes, socket.MSG_WAITALL) # receive n_bytes
return data
except socket.timeout:
# if we got a timeout data will still be none, so we call recv again
# this time in non blocking state and see if we can get some data
try:
return self.sock.recv(n_bytes, socket.MSG_DONTWAIT)
except socket.timeout:
raise TimeoutError
def get_bytes_or_fail(self, n_bytes, deadline):
result = self.get_bytes(n_bytes, deadline)
if len(result) < n_bytes:
raise TimeoutError("expected {} bytes but got only {}".format(n_bytes, len(result)))
return result
def discover_channels(path, serial_number, callback, cancellation_token, channel_termination_token, logger):
"""
Tries to connect to a TCP server based on the path spec.
This function blocks until cancellation_token is set.
Channels spawned by this function run until channel_termination_token is set.
"""
try:
dest_addr = ':'.join(path.split(":")[:-1])
dest_port = int(path.split(":")[-1])
except (ValueError, IndexError):
raise Exception('"{}" is not a valid TCP destination. The format should be something like "localhost:1234".'
.format(path))
while not cancellation_token.is_set():
try:
tcp_transport = fibre.tcp_transport.TCPTransport(dest_addr, dest_port, logger)
stream2packet_input = fibre.protocol.PacketFromStreamConverter(tcp_transport)
packet2stream_output = fibre.protocol.StreamBasedPacketSink(tcp_transport)
channel = fibre.protocol.Channel(
"TCP device {}:{}".format(dest_addr, dest_port),
stream2packet_input, packet2stream_output,
channel_termination_token, logger)
except:
#logger.debug("TCP channel init failed. More info: " + traceback.format_exc())
pass
else:
callback(channel)
wait_any(None, cancellation_token, channel._channel_broken)
time.sleep(1)
| en | 0.820568 | # TODO: FIXME: use IPv6 # Problem: getaddrinfo fails if the resolver returns an # IPv4 address, but we are using AF_INET6 #family = socket.AF_INET6 if socket.has_ipv6 else socket.AF_INET # TODO: Determine the right address to use from the list # TODO: this blocks until a connection is established, or the system cancels it Returns n bytes unless the deadline is reached, in which case the bytes that were read up to that point are returned. If deadline is None the function blocks forever. A deadline before the current time corresponds to non-blocking mode. # convert deadline to seconds (floating point) # receive n_bytes # if we got a timeout data will still be none, so we call recv again # this time in non blocking state and see if we can get some data Tries to connect to a TCP server based on the path spec. This function blocks until cancellation_token is set. Channels spawned by this function run until channel_termination_token is set. #logger.debug("TCP channel init failed. More info: " + traceback.format_exc()) | 2.790787 | 3 |
compute/validate_object_size.py | theri/web-measurement-tools | 11 | 6630084 | #!/usr/bin/env python3
#
# Author: <NAME> (<EMAIL>)
# 2018
# This script computes object sizes from a packet capture trace
# and compare them to HAR and Resource Timings
import os
import sys
import glob
import subprocess
import logging
import csv
import re
import datetime
import computetimings
RUNDIR="../testdata/"
CAPTURE_FILE_NAME = "local\:any.pcap"
# For debugging
#ADDITIONAL_TSHARK_FILTER = " \"frame.number >= 0 and frame.number <= 1000\" "
ADDITIONAL_TSHARK_FILTER = ""
URI_TO_DEBUG=""
#URI_TO_DEBUG = "/c_fill,w_90,h_60,g_faces,q_70/images/20180918/2d02caf9d1a043f38ce843951318e2fa.jpeg"
# From list of HAR timings, as read from log file, get the one which matches this URI and timestamp
def get_matching_hartiming(hartimings, uri_to_look_for, timestamp_to_look_for, statuscode_to_look_for="", use_starttime=False, match_closest=False):
if hartimings is None or len(hartimings) == 0:
return None
logging.debug("Looking for HAR timings for " + uri_to_look_for)
candidates = []
for hart in hartimings:
har_uri = hart["name"]
if har_uri == uri_to_look_for and (hart["status"] == statuscode_to_look_for or statuscode_to_look_for == ""):
logging.debug("HAR: " + str(hart))
startedDateTime = datetime.datetime.strptime(hart["startedDateTime"], "%Y-%m-%d+%H-%M-%S.%f")
if not use_starttime:
pre_send_duration = datetime.timedelta(milliseconds = 0)
try:
# See if this HAR timing is within timing range +- 1 ms (due to rounding)
pre_send_duration = datetime.timedelta(milliseconds=computetimings.sum_timings([hart["blockedTime"], hart["dnsTime"], hart["connectTime"], hart["sslTime"]]) - 1)
post_send_duration = datetime.timedelta(milliseconds=computetimings.sum_timings([hart["blockedTime"], hart["dnsTime"], hart["connectTime"], hart["sslTime"], hart["sendTime"]]) + 1)
except ValueError:
# No timings available - not taking this object
logging.debug("No timings for " + str(hart))
continue
pre_send_time = startedDateTime + pre_send_duration
post_send_time = startedDateTime + post_send_duration
else:
pre_send_time = startedDateTime - datetime.timedelta(milliseconds = 1)
post_send_time = startedDateTime + datetime.timedelta(milliseconds = computetimings.sum_timings([hart["dnsTime"], hart["connectTime"], hart["sslTime"], hart["sendTime"], hart["waitTime"], hart["receiveTime"]]))
logging.debug("\tchecking if timestamp_to_look_for " + str(timestamp_to_look_for) + " is between " + str(pre_send_time) + " and " + str(post_send_time))
# Return the first HAR timing that falls into our timing range
if timestamp_to_look_for >= pre_send_time and timestamp_to_look_for <= post_send_time:
hart["timediff"] = 0
candidates.append(hart)
logging.debug("\t\tYes!\n")
else:
hart["timediff"] = min(abs((timestamp_to_look_for - pre_send_time).total_seconds()), abs((post_send_time - timestamp_to_look_for).total_seconds()))
candidates.append(hart)
logging.debug("\tTimediff " + str(hart["timediff"]))
if len(candidates) == 1:
return candidates[0]
elif len(candidates) > 0 and match_closest:
mincandidate = candidates[0]
logging.debug("candidate with " + str(mincandidate["timediff"]))
for candidate in candidates:
if candidate["timediff"] < mincandidate["timediff"]:
mincandidate = candidate
logging.debug("new min candidate with " + str(mincandidate["timediff"]))
return mincandidate
else:
logging.debug("\tFound none or too many!")
return None
def get_matching_navtiming(navtimings, page_to_look_for, timestamp_to_look_for):
if navtimings is None:
return None
for navt in navtimings:
if navt["page"] == page_to_look_for and navt["starttime"] == timestamp_to_look_for:
return navt
return None
# From a list of resources, give back the one expecting this tcp sequence number
def get_resource_for_packet(resourcelist, tcpseq):
for resource in resourcelist:
if resource["tcp.seq_to_expect"] == tcpseq:
return resource
return None
# From list of resource timings as read from log, get the one matching this URI and timestamp
def get_matching_restiming(restimings, uri_to_look_for, timestamp_to_look_for, page_startedDateTime, match_closest=False):
if restimings is None:
return None
candidates = []
for rest in restimings:
res_uri = rest["name"]
if res_uri == uri_to_look_for:
startedDateTime = page_startedDateTime + datetime.timedelta(milliseconds = float(rest["starttime"]))
duration = datetime.timedelta(milliseconds=float(rest["duration"]))
endTime = startedDateTime + duration
logging.debug("Is " + str(timestamp_to_look_for) + " between " + str(startedDateTime) + " and " + str(endTime) + "?")
if not match_closest:
if timestamp_to_look_for >= startedDateTime and timestamp_to_look_for <= endTime:
return rest
else:
if timestamp_to_look_for >= startedDateTime and timestamp_to_look_for <= endTime:
timediff = 0
else:
timediff = min(abs((timestamp_to_look_for - startedDateTime).total_seconds()), abs((endTime - timestamp_to_look_for).total_seconds()))
rest["timediff"] = timediff
candidates.append(rest)
if not match_closest or len(candidates) < 1:
return None
else:
mincandidate = candidates[0]
logging.debug("candidate with " + str(mincandidate["timediff"]))
for candidate in candidates:
if candidate["timediff"] < mincandidate["timediff"]:
mincandidate = candidate
logging.debug("new min candidate with " + str(mincandidate["timediff"]))
return mincandidate
def log_validation(run, log=True):
HTTP_PCAP_FILE = run + "pcap/http_and_not_ssl.pcap"
print("Logging validation object sizes for " + HTTP_PCAP_FILE)
if not os.path.exists(HTTP_PCAP_FILE):
print("Filtering pcap for only http traffic, this may take a while...")
subprocess.run("tshark -r " + run + "pcap/" + CAPTURE_FILE_NAME + " -w " + HTTP_PCAP_FILE + " -Y \"(tcp.srcport == 80 or tcp.dstport == 80 and not ssl) and tcp.len > 0\"", shell=True)
csv.register_dialect('sepbyhash', delimiter='#')
process_headers = subprocess.run("tshark -r " + HTTP_PCAP_FILE + (" -Y" + ADDITIONAL_TSHARK_FILTER if ADDITIONAL_TSHARK_FILTER else "") + " -T fields -E separator=# -e frame.time_epoch -e tcp.stream -e tcp.srcport -e tcp.seq -e tcp.ack -e http.host -e http.request.uri -e http.response.code -e tcp.len", shell=True, stdout=subprocess.PIPE, universal_newlines=True)
# Process trace once more to get raw TCP data - this only works if data has not been analyzed by HTTP dissector
process_data = subprocess.run("tshark -r " + HTTP_PCAP_FILE + (" -Y" + ADDITIONAL_TSHARK_FILTER if ADDITIONAL_TSHARK_FILTER else "") + " --disable-protocol http -T fields -e data", shell=True, stdout=subprocess.PIPE, universal_newlines=True)
headers = process_headers.stdout.splitlines()
data = process_data.stdout.splitlines()
reader = csv.DictReader(headers, dialect='sepbyhash', fieldnames=["timestamp", "tcp.stream", "tcp.srcport", "tcp.seq", "tcp.ack", "http.host", "http.request.uri", "http.response.code", "tcp.len"])
packetlist = list(reader)
tcpstreams = {}
tcpstream_to_debug = ""
for (index, packet) in enumerate(packetlist):
tcpstream = packet["tcp.stream"]
if tcpstream == tcpstream_to_debug:
print("Packet in tcpstream_to_debug " + str(tcpstream_to_debug) + ": " + str(packet))
# If this packet contains an HTTP request URI as parsed by tshark:
# Create an entry for the new resource and add it to this tcpstream's dict
if packet["http.request.uri"]:
uri = packet["http.host"] + packet["http.request.uri"]
newresource = { "host" : packet["http.host"], "uri" : packet["http.request.uri"], "requesttimestamp": packet["timestamp"], "response": None, "tcp.seq_to_expect": int(packet["tcp.ack"])}
# Is there a pending HTTP transfer (that is expecting data on this tcp.seq)? Invalidate it.
resource = None
try:
resource = get_resource_for_packet(tcpstreams[tcpstream], int(packet["tcp.ack"]))
except KeyError:
logging.debug("No resources yet -- everything is fine")
if resource:
logging.debug("Already expecting a non-finished resource here: " + str(resource["uri"]) + " -- invalidating")
resource["tcp.seq_to_expect"] = -1
try:
tcpstreams[tcpstream].append(newresource)
except KeyError:
tcpstreams[tcpstream] = [newresource]
logging.debug("\tLogged request for " + uri + " - awaiting reply at tcp.seq " + str(packet["tcp.ack"]))
#if URI_TO_DEBUG == uri:
# #print("Request: " + str(packet) + " - logged: " + str(tcpstreams[tcpstream][-1]))
# tcpstream_to_debug = tcpstream
else:
# Not an HTTP request - see if we already have HTTP requests on this tcpstream
# and if so, try to get an HTTP request expecting this packet's sequence number
try:
resource = get_resource_for_packet(tcpstreams[tcpstream], int(packet["tcp.seq"]))
if not resource:
logging.debug("Could not get resource expecting this tcp.seq " + packet["tcp.seq"] + " -- not using it")
continue
except KeyError as err:
# Did not find an HTTP request logged for this tcpstream
logging.debug("Got KeyError " + str(err) + " -- continuing")
continue
# We got a resource -- analyze how this packet relates to it
resource["tcp.seq_to_expect"] += int(packet["tcp.len"])
logging.debug("Got a resource in tcpstream " + str(tcpstream) + " at tcp.seq " + packet["tcp.seq"] + ": " + resource["host"] + resource["uri"])
# If this packet contains an HTTP response code as parsed by tshark:
# Look if the last entry of the resources for this tcpstream matches.
# Matching means that the requests's tcp.ack matches this packet's tcp_seq,
# so we can assume that this is a response to that request
if packet["http.response.code"]:
tcpdata = data[index]
if len(tcpdata) / 2 != int(packet["tcp.len"]):
# length of our tcpdata does not match tcp.len header field -- invalidating this resource
logging.debug("Data length " + str(int(len(tcpdata) / 2)) + " does not match tcp.len " + packet["tcp.len"])
resource["tcp.seq_to_expect"] = -1
continue
resource["status"] = packet["http.response.code"]
uri = resource["host"] + resource["uri"]
if URI_TO_DEBUG == uri:
print("Computing stuff for " + uri + ": ")
resource["response"] = True
# Calculate length of header and body by splitting raw data on \r\n\r\n
resource["tcplen"] = packet["tcp.len"]
if "startofresponse" in resource.keys():
tcpdata = resource["startofresponse"] + data[index]
else:
tcpdata = data[index]
if "0d0a0d0a" in tcpdata:
header, body = tcpdata.split("0d0a0d0a", 1)
# Split raw TCP data between HTTP header and body based on "0d0a0d0a", then count bytes
# Every byte got logged as two ascii characters - have to add 0d0a0d0a for headers again
resource["headerlen"] = int(len(header) / 2 + 4)
resource["bodylen"] = int(len(body) / 2)
else:
logging.debug("Found no 0x0d0a0d0a for " + uri + " in " + str(len(tcpdata)) + " bytes")
if "0a0a" in tcpdata:
logging.debug("This might be one of those rare cases with LFLF insteaf of CRLFCRLF... that is not standards compliant to HTTP/1.1. I can't take that.")
continue
else:
# No delimiter - assume it's all headers
resource["headerlen"] = int(len(tcpdata) / 2)
resource["bodylen"] = 0
logging.debug("\tComputed resource header length " + str(resource["headerlen"]) + " and body length " + str(resource["bodylen"]) + " for " + uri)
# Do not expect a tcp.seq anymore
resource["tcp.seq_to_expect"] = -1
if uri == URI_TO_DEBUG:
logging.debug("Added packet to end of list " + str(tcpstreams[tcpstream]))
# This packet contains neither an http.request.uri nor an http.response.code
# but it might be a continuation of a previous response
# or it might actually be an HTTP response, just not decoded by tshark
else:
tcpdata = data[index]
if len(tcpdata) / 2 != int(packet["tcp.len"]):
# length of our tcpdata does not match tcp.len header field -- invalidating this resource
logging.debug("Data length " + str(int(len(tcpdata) / 2)) + " does not match tcp.len " + packet["tcp.len"])
resource["tcp.seq_to_expect"] = -1
continue
# No HTTP request and no HTTP response code but TCP stream continues
# --> Might be HTTP continuation of a previous response
if resource["response"]:
resource["bodylen"] += len(tcpdata)
else:
# Is this actually an HTTP response, but tshark was just too stupid to dissect it?
if tcpdata[:18] == "485454502f312e3120" or tcpdata[:18] == "485454502f312e3020":
# Start of data says "HTTP/1.1" or 1.0 ... this is a response. Store data for later analysis
tcpstreams[tcpstream][-1]["startofresponse"] = tcpdata
logging.debug("This is the start of a not-yet-parsed HTTP response... storing " + str(int(len(tcpdata)/2)) + " bytes")
elif "startofresponse" in tcpstreams[tcpstream][-1].keys():
# We have a start of a response, but did not actually parse the complete response yet
# -- add this to startofresponse
tcpstreams[tcpstream][-1]["startofresponse"] += tcpdata
logging.debug("This is the continuation of a not-yet-parsed HTTP response... storing " + str(int(len(tcpdata)/2)) + " bytes")
else:
# Got something, but not the start of an HTTP reply... invalidating this resource
resource["tcp.seq_to_expect"] = -1
logfilename = run + "object_sizes_trace.log"
if log:
try:
if os.path.exists(logfilename):
os.remove(logfilename)
print("Deleted old " + logfilename)
except Exception as err:
print("Could not delete " + logfilename + ": " + str(err))
try:
csvfile = open(logfilename, "w", newline='')
except TypeError as e:
print("Error opening " + logfilename + ": " + str(e))
csvfile = open(logfilename, 'wb')
csvwriter = csv.writer(csvfile, delimiter=",")
starttimings = computetimings.read_starttimings(run)
navtimings = computetimings.read_navtimings(run)
hartimings = {}
restimings = {}
resources_per_page_load = {}
max_tcpstream = max([ int(streamid) for streamid in tcpstreams.keys() ])
logging.debug("Max tcpstream: " + str(max_tcpstream))
# Go through TCP streams, match them to page loads (pagelabel) based on timestamps
for i in list(range(0, max_tcpstream)):
tcpstream = str(i)
try:
resources = tcpstreams[tcpstream]
except KeyError:
logging.debug("No resources for TCP stream " + str(tcpstream))
continue
# Find out which page load the first resource belongs to
requesttimestamp = datetime.datetime.fromtimestamp(float(resources[0]["requesttimestamp"]))
(pageurl, starttime) = computetimings.find_first_url_in_starttimings(starttimings, requesttimestamp)
if not pageurl:
# Did not find which page load this belongs to - cannot do anything
continue
else:
logging.debug("Found page url " + pageurl)
starttimestamp = starttime.replace(" ", "+").replace(":", "-")
pagelabel = pageurl.replace("http://", "") + "+" + starttimestamp
try:
resources_per_page_load[pagelabel].extend(resources)
except KeyError:
resources_per_page_load[pagelabel] = resources
for (pagelabel, resources) in resources_per_page_load.items():
print("Page load: " + pagelabel)
# Sort resources in this page load by requesttimestamp, then match them to HAR and resource timings
for r in sorted(resources, key=lambda k: float(k["requesttimestamp"])):
try:
uri = "http://" + r["host"] + r["uri"]
requesttimestamp = datetime.datetime.fromtimestamp(float(r["requesttimestamp"]))
bodylen = r["bodylen"]
except KeyError:
logging.info("\t\t" + "no reply for " + str(r["uri"]))
continue
(pageurl, starttimestamp) = pagelabel.split("+", 1)
pageurl = "http://" + pageurl
logging.debug("URI: " + uri)
navt = get_matching_navtiming(navtimings, pageurl, starttimestamp)
if navt is None:
print("Did not get navtiming for " + pageurl + "+" + str(starttimestamp))
continue
#print("Resource: " + str(r))
# Get a HAR timing matching this specific resource from the HAR timings
try:
hart = get_matching_hartiming(hartimings[pagelabel], uri, requesttimestamp, r["status"])
except KeyError:
hartimings[pagelabel] = computetimings.get_hartimings(run, pagelabel, navt)
hart = get_matching_hartiming(hartimings[pagelabel], uri, requesttimestamp, r["status"])
if not hart:
har_headerlen = "NA"
har_bodylen = "NA"
har_contentlengthheader = "NA"
har_transfersize = "NA"
logging.debug("No HAR timing :(")
else:
har_headerlen = hart["respheadersize"]
har_bodylen = hart["respbodysize"]
har_contentlengthheader = hart["contentlengthheader"]
har_transfersize = hart["resptransfersize"]
# remove the found HAR timing from the list - don't want to match it twice
hartimings[pagelabel].remove(hart)
if not navt:
# No nav timing for this page -- cannot match a resource timing!
rest = None
else:
# Get a resource timing matching this specific resource
try:
rest = get_matching_restiming(restimings[pagelabel], uri, requesttimestamp, datetime.datetime.fromtimestamp(float(navt["navigationStart"])))
except KeyError:
restimings[pagelabel] = computetimings.get_restimings(run, pagelabel)
rest = get_matching_restiming(restimings[pagelabel], uri, requesttimestamp, datetime.datetime.fromtimestamp(float(navt["navigationStart"])))
if not rest:
res_bodylen = "NA"
else:
res_bodylen = rest["encodedBodySize"]
# Remove the found resource timing from list - don't want to match it twice
restimings[pagelabel].remove(rest)
# For this resource, log all header and body sizes from trace, HAR, and resource timings
if ADDITIONAL_TSHARK_FILTER:
if r["uri"] in URI_TO_DEBUG:
print("\t\t" + r["status"] + " " + r["host"] + r["uri"] + "\n\t\t" + str(r["headerlen"]) + " + " + str(r["bodylen"]) + " = " + str(r["tcplen"]) + " bytes (HTTP headers + body)")
if log:
csvwriter.writerow([pageurl, starttimestamp, r["requesttimestamp"], uri, r["status"], r["tcplen"], r["headerlen"], r["bodylen"], har_transfersize, har_headerlen, har_bodylen, har_contentlengthheader, res_bodylen])
if log:
csvfile.close()
def main(argv=[]):
log = True
if ADDITIONAL_TSHARK_FILTER:
log = False
runs = glob.glob(RUNDIR + "run-*")
if (len(argv) > 1):
runfilter = argv[1]
runs = [ r for r in runs if runfilter in r ]
print("Running for " + str(runs))
for run in runs:
if run[-1] != "/":
run = run + "/"
log_validation(run, log)
if __name__ == "__main__":
main(sys.argv)
| #!/usr/bin/env python3
#
# Author: <NAME> (<EMAIL>)
# 2018
# This script computes object sizes from a packet capture trace
# and compare them to HAR and Resource Timings
import os
import sys
import glob
import subprocess
import logging
import csv
import re
import datetime
import computetimings
RUNDIR="../testdata/"
CAPTURE_FILE_NAME = "local\:any.pcap"
# For debugging
#ADDITIONAL_TSHARK_FILTER = " \"frame.number >= 0 and frame.number <= 1000\" "
ADDITIONAL_TSHARK_FILTER = ""
URI_TO_DEBUG=""
#URI_TO_DEBUG = "/c_fill,w_90,h_60,g_faces,q_70/images/20180918/2d02caf9d1a043f38ce843951318e2fa.jpeg"
# From list of HAR timings, as read from log file, get the one which matches this URI and timestamp
def get_matching_hartiming(hartimings, uri_to_look_for, timestamp_to_look_for, statuscode_to_look_for="", use_starttime=False, match_closest=False):
if hartimings is None or len(hartimings) == 0:
return None
logging.debug("Looking for HAR timings for " + uri_to_look_for)
candidates = []
for hart in hartimings:
har_uri = hart["name"]
if har_uri == uri_to_look_for and (hart["status"] == statuscode_to_look_for or statuscode_to_look_for == ""):
logging.debug("HAR: " + str(hart))
startedDateTime = datetime.datetime.strptime(hart["startedDateTime"], "%Y-%m-%d+%H-%M-%S.%f")
if not use_starttime:
pre_send_duration = datetime.timedelta(milliseconds = 0)
try:
# See if this HAR timing is within timing range +- 1 ms (due to rounding)
pre_send_duration = datetime.timedelta(milliseconds=computetimings.sum_timings([hart["blockedTime"], hart["dnsTime"], hart["connectTime"], hart["sslTime"]]) - 1)
post_send_duration = datetime.timedelta(milliseconds=computetimings.sum_timings([hart["blockedTime"], hart["dnsTime"], hart["connectTime"], hart["sslTime"], hart["sendTime"]]) + 1)
except ValueError:
# No timings available - not taking this object
logging.debug("No timings for " + str(hart))
continue
pre_send_time = startedDateTime + pre_send_duration
post_send_time = startedDateTime + post_send_duration
else:
pre_send_time = startedDateTime - datetime.timedelta(milliseconds = 1)
post_send_time = startedDateTime + datetime.timedelta(milliseconds = computetimings.sum_timings([hart["dnsTime"], hart["connectTime"], hart["sslTime"], hart["sendTime"], hart["waitTime"], hart["receiveTime"]]))
logging.debug("\tchecking if timestamp_to_look_for " + str(timestamp_to_look_for) + " is between " + str(pre_send_time) + " and " + str(post_send_time))
# Return the first HAR timing that falls into our timing range
if timestamp_to_look_for >= pre_send_time and timestamp_to_look_for <= post_send_time:
hart["timediff"] = 0
candidates.append(hart)
logging.debug("\t\tYes!\n")
else:
hart["timediff"] = min(abs((timestamp_to_look_for - pre_send_time).total_seconds()), abs((post_send_time - timestamp_to_look_for).total_seconds()))
candidates.append(hart)
logging.debug("\tTimediff " + str(hart["timediff"]))
if len(candidates) == 1:
return candidates[0]
elif len(candidates) > 0 and match_closest:
mincandidate = candidates[0]
logging.debug("candidate with " + str(mincandidate["timediff"]))
for candidate in candidates:
if candidate["timediff"] < mincandidate["timediff"]:
mincandidate = candidate
logging.debug("new min candidate with " + str(mincandidate["timediff"]))
return mincandidate
else:
logging.debug("\tFound none or too many!")
return None
def get_matching_navtiming(navtimings, page_to_look_for, timestamp_to_look_for):
if navtimings is None:
return None
for navt in navtimings:
if navt["page"] == page_to_look_for and navt["starttime"] == timestamp_to_look_for:
return navt
return None
# From a list of resources, give back the one expecting this tcp sequence number
def get_resource_for_packet(resourcelist, tcpseq):
for resource in resourcelist:
if resource["tcp.seq_to_expect"] == tcpseq:
return resource
return None
# From list of resource timings as read from log, get the one matching this URI and timestamp
def get_matching_restiming(restimings, uri_to_look_for, timestamp_to_look_for, page_startedDateTime, match_closest=False):
if restimings is None:
return None
candidates = []
for rest in restimings:
res_uri = rest["name"]
if res_uri == uri_to_look_for:
startedDateTime = page_startedDateTime + datetime.timedelta(milliseconds = float(rest["starttime"]))
duration = datetime.timedelta(milliseconds=float(rest["duration"]))
endTime = startedDateTime + duration
logging.debug("Is " + str(timestamp_to_look_for) + " between " + str(startedDateTime) + " and " + str(endTime) + "?")
if not match_closest:
if timestamp_to_look_for >= startedDateTime and timestamp_to_look_for <= endTime:
return rest
else:
if timestamp_to_look_for >= startedDateTime and timestamp_to_look_for <= endTime:
timediff = 0
else:
timediff = min(abs((timestamp_to_look_for - startedDateTime).total_seconds()), abs((endTime - timestamp_to_look_for).total_seconds()))
rest["timediff"] = timediff
candidates.append(rest)
if not match_closest or len(candidates) < 1:
return None
else:
mincandidate = candidates[0]
logging.debug("candidate with " + str(mincandidate["timediff"]))
for candidate in candidates:
if candidate["timediff"] < mincandidate["timediff"]:
mincandidate = candidate
logging.debug("new min candidate with " + str(mincandidate["timediff"]))
return mincandidate
def log_validation(run, log=True):
HTTP_PCAP_FILE = run + "pcap/http_and_not_ssl.pcap"
print("Logging validation object sizes for " + HTTP_PCAP_FILE)
if not os.path.exists(HTTP_PCAP_FILE):
print("Filtering pcap for only http traffic, this may take a while...")
subprocess.run("tshark -r " + run + "pcap/" + CAPTURE_FILE_NAME + " -w " + HTTP_PCAP_FILE + " -Y \"(tcp.srcport == 80 or tcp.dstport == 80 and not ssl) and tcp.len > 0\"", shell=True)
csv.register_dialect('sepbyhash', delimiter='#')
process_headers = subprocess.run("tshark -r " + HTTP_PCAP_FILE + (" -Y" + ADDITIONAL_TSHARK_FILTER if ADDITIONAL_TSHARK_FILTER else "") + " -T fields -E separator=# -e frame.time_epoch -e tcp.stream -e tcp.srcport -e tcp.seq -e tcp.ack -e http.host -e http.request.uri -e http.response.code -e tcp.len", shell=True, stdout=subprocess.PIPE, universal_newlines=True)
# Process trace once more to get raw TCP data - this only works if data has not been analyzed by HTTP dissector
process_data = subprocess.run("tshark -r " + HTTP_PCAP_FILE + (" -Y" + ADDITIONAL_TSHARK_FILTER if ADDITIONAL_TSHARK_FILTER else "") + " --disable-protocol http -T fields -e data", shell=True, stdout=subprocess.PIPE, universal_newlines=True)
headers = process_headers.stdout.splitlines()
data = process_data.stdout.splitlines()
reader = csv.DictReader(headers, dialect='sepbyhash', fieldnames=["timestamp", "tcp.stream", "tcp.srcport", "tcp.seq", "tcp.ack", "http.host", "http.request.uri", "http.response.code", "tcp.len"])
packetlist = list(reader)
tcpstreams = {}
tcpstream_to_debug = ""
for (index, packet) in enumerate(packetlist):
tcpstream = packet["tcp.stream"]
if tcpstream == tcpstream_to_debug:
print("Packet in tcpstream_to_debug " + str(tcpstream_to_debug) + ": " + str(packet))
# If this packet contains an HTTP request URI as parsed by tshark:
# Create an entry for the new resource and add it to this tcpstream's dict
if packet["http.request.uri"]:
uri = packet["http.host"] + packet["http.request.uri"]
newresource = { "host" : packet["http.host"], "uri" : packet["http.request.uri"], "requesttimestamp": packet["timestamp"], "response": None, "tcp.seq_to_expect": int(packet["tcp.ack"])}
# Is there a pending HTTP transfer (that is expecting data on this tcp.seq)? Invalidate it.
resource = None
try:
resource = get_resource_for_packet(tcpstreams[tcpstream], int(packet["tcp.ack"]))
except KeyError:
logging.debug("No resources yet -- everything is fine")
if resource:
logging.debug("Already expecting a non-finished resource here: " + str(resource["uri"]) + " -- invalidating")
resource["tcp.seq_to_expect"] = -1
try:
tcpstreams[tcpstream].append(newresource)
except KeyError:
tcpstreams[tcpstream] = [newresource]
logging.debug("\tLogged request for " + uri + " - awaiting reply at tcp.seq " + str(packet["tcp.ack"]))
#if URI_TO_DEBUG == uri:
# #print("Request: " + str(packet) + " - logged: " + str(tcpstreams[tcpstream][-1]))
# tcpstream_to_debug = tcpstream
else:
# Not an HTTP request - see if we already have HTTP requests on this tcpstream
# and if so, try to get an HTTP request expecting this packet's sequence number
try:
resource = get_resource_for_packet(tcpstreams[tcpstream], int(packet["tcp.seq"]))
if not resource:
logging.debug("Could not get resource expecting this tcp.seq " + packet["tcp.seq"] + " -- not using it")
continue
except KeyError as err:
# Did not find an HTTP request logged for this tcpstream
logging.debug("Got KeyError " + str(err) + " -- continuing")
continue
# We got a resource -- analyze how this packet relates to it
resource["tcp.seq_to_expect"] += int(packet["tcp.len"])
logging.debug("Got a resource in tcpstream " + str(tcpstream) + " at tcp.seq " + packet["tcp.seq"] + ": " + resource["host"] + resource["uri"])
# If this packet contains an HTTP response code as parsed by tshark:
# Look if the last entry of the resources for this tcpstream matches.
# Matching means that the requests's tcp.ack matches this packet's tcp_seq,
# so we can assume that this is a response to that request
if packet["http.response.code"]:
tcpdata = data[index]
if len(tcpdata) / 2 != int(packet["tcp.len"]):
# length of our tcpdata does not match tcp.len header field -- invalidating this resource
logging.debug("Data length " + str(int(len(tcpdata) / 2)) + " does not match tcp.len " + packet["tcp.len"])
resource["tcp.seq_to_expect"] = -1
continue
resource["status"] = packet["http.response.code"]
uri = resource["host"] + resource["uri"]
if URI_TO_DEBUG == uri:
print("Computing stuff for " + uri + ": ")
resource["response"] = True
# Calculate length of header and body by splitting raw data on \r\n\r\n
resource["tcplen"] = packet["tcp.len"]
if "startofresponse" in resource.keys():
tcpdata = resource["startofresponse"] + data[index]
else:
tcpdata = data[index]
if "0d0a0d0a" in tcpdata:
header, body = tcpdata.split("0d0a0d0a", 1)
# Split raw TCP data between HTTP header and body based on "0d0a0d0a", then count bytes
# Every byte got logged as two ascii characters - have to add 0d0a0d0a for headers again
resource["headerlen"] = int(len(header) / 2 + 4)
resource["bodylen"] = int(len(body) / 2)
else:
logging.debug("Found no 0x0d0a0d0a for " + uri + " in " + str(len(tcpdata)) + " bytes")
if "0a0a" in tcpdata:
logging.debug("This might be one of those rare cases with LFLF insteaf of CRLFCRLF... that is not standards compliant to HTTP/1.1. I can't take that.")
continue
else:
# No delimiter - assume it's all headers
resource["headerlen"] = int(len(tcpdata) / 2)
resource["bodylen"] = 0
logging.debug("\tComputed resource header length " + str(resource["headerlen"]) + " and body length " + str(resource["bodylen"]) + " for " + uri)
# Do not expect a tcp.seq anymore
resource["tcp.seq_to_expect"] = -1
if uri == URI_TO_DEBUG:
logging.debug("Added packet to end of list " + str(tcpstreams[tcpstream]))
# This packet contains neither an http.request.uri nor an http.response.code
# but it might be a continuation of a previous response
# or it might actually be an HTTP response, just not decoded by tshark
else:
tcpdata = data[index]
if len(tcpdata) / 2 != int(packet["tcp.len"]):
# length of our tcpdata does not match tcp.len header field -- invalidating this resource
logging.debug("Data length " + str(int(len(tcpdata) / 2)) + " does not match tcp.len " + packet["tcp.len"])
resource["tcp.seq_to_expect"] = -1
continue
# No HTTP request and no HTTP response code but TCP stream continues
# --> Might be HTTP continuation of a previous response
if resource["response"]:
resource["bodylen"] += len(tcpdata)
else:
# Is this actually an HTTP response, but tshark was just too stupid to dissect it?
if tcpdata[:18] == "485454502f312e3120" or tcpdata[:18] == "485454502f312e3020":
# Start of data says "HTTP/1.1" or 1.0 ... this is a response. Store data for later analysis
tcpstreams[tcpstream][-1]["startofresponse"] = tcpdata
logging.debug("This is the start of a not-yet-parsed HTTP response... storing " + str(int(len(tcpdata)/2)) + " bytes")
elif "startofresponse" in tcpstreams[tcpstream][-1].keys():
# We have a start of a response, but did not actually parse the complete response yet
# -- add this to startofresponse
tcpstreams[tcpstream][-1]["startofresponse"] += tcpdata
logging.debug("This is the continuation of a not-yet-parsed HTTP response... storing " + str(int(len(tcpdata)/2)) + " bytes")
else:
# Got something, but not the start of an HTTP reply... invalidating this resource
resource["tcp.seq_to_expect"] = -1
logfilename = run + "object_sizes_trace.log"
if log:
try:
if os.path.exists(logfilename):
os.remove(logfilename)
print("Deleted old " + logfilename)
except Exception as err:
print("Could not delete " + logfilename + ": " + str(err))
try:
csvfile = open(logfilename, "w", newline='')
except TypeError as e:
print("Error opening " + logfilename + ": " + str(e))
csvfile = open(logfilename, 'wb')
csvwriter = csv.writer(csvfile, delimiter=",")
starttimings = computetimings.read_starttimings(run)
navtimings = computetimings.read_navtimings(run)
hartimings = {}
restimings = {}
resources_per_page_load = {}
max_tcpstream = max([ int(streamid) for streamid in tcpstreams.keys() ])
logging.debug("Max tcpstream: " + str(max_tcpstream))
# Go through TCP streams, match them to page loads (pagelabel) based on timestamps
for i in list(range(0, max_tcpstream)):
tcpstream = str(i)
try:
resources = tcpstreams[tcpstream]
except KeyError:
logging.debug("No resources for TCP stream " + str(tcpstream))
continue
# Find out which page load the first resource belongs to
requesttimestamp = datetime.datetime.fromtimestamp(float(resources[0]["requesttimestamp"]))
(pageurl, starttime) = computetimings.find_first_url_in_starttimings(starttimings, requesttimestamp)
if not pageurl:
# Did not find which page load this belongs to - cannot do anything
continue
else:
logging.debug("Found page url " + pageurl)
starttimestamp = starttime.replace(" ", "+").replace(":", "-")
pagelabel = pageurl.replace("http://", "") + "+" + starttimestamp
try:
resources_per_page_load[pagelabel].extend(resources)
except KeyError:
resources_per_page_load[pagelabel] = resources
for (pagelabel, resources) in resources_per_page_load.items():
print("Page load: " + pagelabel)
# Sort resources in this page load by requesttimestamp, then match them to HAR and resource timings
for r in sorted(resources, key=lambda k: float(k["requesttimestamp"])):
try:
uri = "http://" + r["host"] + r["uri"]
requesttimestamp = datetime.datetime.fromtimestamp(float(r["requesttimestamp"]))
bodylen = r["bodylen"]
except KeyError:
logging.info("\t\t" + "no reply for " + str(r["uri"]))
continue
(pageurl, starttimestamp) = pagelabel.split("+", 1)
pageurl = "http://" + pageurl
logging.debug("URI: " + uri)
navt = get_matching_navtiming(navtimings, pageurl, starttimestamp)
if navt is None:
print("Did not get navtiming for " + pageurl + "+" + str(starttimestamp))
continue
#print("Resource: " + str(r))
# Get a HAR timing matching this specific resource from the HAR timings
try:
hart = get_matching_hartiming(hartimings[pagelabel], uri, requesttimestamp, r["status"])
except KeyError:
hartimings[pagelabel] = computetimings.get_hartimings(run, pagelabel, navt)
hart = get_matching_hartiming(hartimings[pagelabel], uri, requesttimestamp, r["status"])
if not hart:
har_headerlen = "NA"
har_bodylen = "NA"
har_contentlengthheader = "NA"
har_transfersize = "NA"
logging.debug("No HAR timing :(")
else:
har_headerlen = hart["respheadersize"]
har_bodylen = hart["respbodysize"]
har_contentlengthheader = hart["contentlengthheader"]
har_transfersize = hart["resptransfersize"]
# remove the found HAR timing from the list - don't want to match it twice
hartimings[pagelabel].remove(hart)
if not navt:
# No nav timing for this page -- cannot match a resource timing!
rest = None
else:
# Get a resource timing matching this specific resource
try:
rest = get_matching_restiming(restimings[pagelabel], uri, requesttimestamp, datetime.datetime.fromtimestamp(float(navt["navigationStart"])))
except KeyError:
restimings[pagelabel] = computetimings.get_restimings(run, pagelabel)
rest = get_matching_restiming(restimings[pagelabel], uri, requesttimestamp, datetime.datetime.fromtimestamp(float(navt["navigationStart"])))
if not rest:
res_bodylen = "NA"
else:
res_bodylen = rest["encodedBodySize"]
# Remove the found resource timing from list - don't want to match it twice
restimings[pagelabel].remove(rest)
# For this resource, log all header and body sizes from trace, HAR, and resource timings
if ADDITIONAL_TSHARK_FILTER:
if r["uri"] in URI_TO_DEBUG:
print("\t\t" + r["status"] + " " + r["host"] + r["uri"] + "\n\t\t" + str(r["headerlen"]) + " + " + str(r["bodylen"]) + " = " + str(r["tcplen"]) + " bytes (HTTP headers + body)")
if log:
csvwriter.writerow([pageurl, starttimestamp, r["requesttimestamp"], uri, r["status"], r["tcplen"], r["headerlen"], r["bodylen"], har_transfersize, har_headerlen, har_bodylen, har_contentlengthheader, res_bodylen])
if log:
csvfile.close()
def main(argv=[]):
log = True
if ADDITIONAL_TSHARK_FILTER:
log = False
runs = glob.glob(RUNDIR + "run-*")
if (len(argv) > 1):
runfilter = argv[1]
runs = [ r for r in runs if runfilter in r ]
print("Running for " + str(runs))
for run in runs:
if run[-1] != "/":
run = run + "/"
log_validation(run, log)
if __name__ == "__main__":
main(sys.argv)
| en | 0.829529 | #!/usr/bin/env python3 # # Author: <NAME> (<EMAIL>) # 2018 # This script computes object sizes from a packet capture trace # and compare them to HAR and Resource Timings # For debugging #ADDITIONAL_TSHARK_FILTER = " \"frame.number >= 0 and frame.number <= 1000\" " #URI_TO_DEBUG = "/c_fill,w_90,h_60,g_faces,q_70/images/20180918/2d02caf9d1a043f38ce843951318e2fa.jpeg" # From list of HAR timings, as read from log file, get the one which matches this URI and timestamp # See if this HAR timing is within timing range +- 1 ms (due to rounding) # No timings available - not taking this object # Return the first HAR timing that falls into our timing range # From a list of resources, give back the one expecting this tcp sequence number # From list of resource timings as read from log, get the one matching this URI and timestamp # -e frame.time_epoch -e tcp.stream -e tcp.srcport -e tcp.seq -e tcp.ack -e http.host -e http.request.uri -e http.response.code -e tcp.len", shell=True, stdout=subprocess.PIPE, universal_newlines=True) # Process trace once more to get raw TCP data - this only works if data has not been analyzed by HTTP dissector # If this packet contains an HTTP request URI as parsed by tshark: # Create an entry for the new resource and add it to this tcpstream's dict # Is there a pending HTTP transfer (that is expecting data on this tcp.seq)? Invalidate it. #if URI_TO_DEBUG == uri: # #print("Request: " + str(packet) + " - logged: " + str(tcpstreams[tcpstream][-1])) # tcpstream_to_debug = tcpstream # Not an HTTP request - see if we already have HTTP requests on this tcpstream # and if so, try to get an HTTP request expecting this packet's sequence number # Did not find an HTTP request logged for this tcpstream # We got a resource -- analyze how this packet relates to it # If this packet contains an HTTP response code as parsed by tshark: # Look if the last entry of the resources for this tcpstream matches. # Matching means that the requests's tcp.ack matches this packet's tcp_seq, # so we can assume that this is a response to that request # length of our tcpdata does not match tcp.len header field -- invalidating this resource # Calculate length of header and body by splitting raw data on \r\n\r\n # Split raw TCP data between HTTP header and body based on "0d0a0d0a", then count bytes # Every byte got logged as two ascii characters - have to add 0d0a0d0a for headers again # No delimiter - assume it's all headers # Do not expect a tcp.seq anymore # This packet contains neither an http.request.uri nor an http.response.code # but it might be a continuation of a previous response # or it might actually be an HTTP response, just not decoded by tshark # length of our tcpdata does not match tcp.len header field -- invalidating this resource # No HTTP request and no HTTP response code but TCP stream continues # --> Might be HTTP continuation of a previous response # Is this actually an HTTP response, but tshark was just too stupid to dissect it? # Start of data says "HTTP/1.1" or 1.0 ... this is a response. Store data for later analysis # We have a start of a response, but did not actually parse the complete response yet # -- add this to startofresponse # Got something, but not the start of an HTTP reply... invalidating this resource # Go through TCP streams, match them to page loads (pagelabel) based on timestamps # Find out which page load the first resource belongs to # Did not find which page load this belongs to - cannot do anything # Sort resources in this page load by requesttimestamp, then match them to HAR and resource timings #print("Resource: " + str(r)) # Get a HAR timing matching this specific resource from the HAR timings # remove the found HAR timing from the list - don't want to match it twice # No nav timing for this page -- cannot match a resource timing! # Get a resource timing matching this specific resource # Remove the found resource timing from list - don't want to match it twice # For this resource, log all header and body sizes from trace, HAR, and resource timings | 2.287577 | 2 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/ccxcon/migrations/0002_auto_20160325_0407.py | osoco/better-ways-of-thinking-about-software | 3 | 6630085 | <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/ccxcon/migrations/0002_auto_20160325_0407.py<gh_stars>1-10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ccxcon', '0001_initial_ccxcon_model'),
]
operations = [
migrations.AlterModelOptions(
name='ccxcon',
options={'verbose_name': 'CCX Connector', 'verbose_name_plural': 'CCX Connectors'},
),
]
| <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/ccxcon/migrations/0002_auto_20160325_0407.py<gh_stars>1-10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ccxcon', '0001_initial_ccxcon_model'),
]
operations = [
migrations.AlterModelOptions(
name='ccxcon',
options={'verbose_name': 'CCX Connector', 'verbose_name_plural': 'CCX Connectors'},
),
]
| none | 1 | 1.777399 | 2 |
|
LHD_Build_2022/pi.py | Welf06/Beginnner_Projects | 1 | 6630086 | import imp
from math import pi
print("FINDING Nth DIGIT OF PI")
try:
num = int(input("N = "))
except:
print("Enter a valid integer!")
if num > 48 or num < 0:
print("Please enter a num between 0 to 48")
else:
print(f"{pi:.48f}"[num+1]) | import imp
from math import pi
print("FINDING Nth DIGIT OF PI")
try:
num = int(input("N = "))
except:
print("Enter a valid integer!")
if num > 48 or num < 0:
print("Please enter a num between 0 to 48")
else:
print(f"{pi:.48f}"[num+1]) | none | 1 | 4.248526 | 4 |
|
tracker.py | ppabli/awesometracker-desktop | 0 | 6630087 | from tkinter import *
from tkinter import ttk
import threading
import sys
import datetime
import script
import os
import requests
import webbrowser
class MainWindow():
def __init__(self, window, frame):
self.window = window
self.frame = frame
if window == None:
self.window = Tk()
self.window.minsize(250, 175)
self.window.iconbitmap("./media/favicon.ico")
self.window.title("AwesomeTracker")
else:
self.window.minsize(250, 175)
self.window.iconbitmap("./media/favicon.ico")
self.window.title("AwesomeTracker")
if frame == None:
self.frame = Frame(self.window, bg = "#F4F133")
self.frame.pack(fill = BOTH, expand = 1)
else:
self.frame.pack_forget()
self.frame = Frame(self.window)
self.frame.pack(fill = BOTH, expand = 1)
self.greetingsLabel = Label(self.frame, text = "Welcome to AwesomeTracker", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.greetingsLabel.pack(fill = "x")
self.versionLabel = Label(self.frame, text = "Version - V 0.1 \n\r Version notes: \n\r -Performance improvements\n\r", relief = "solid", font = ("arial", 12, "bold"))
self.versionLabel.pack(fill = BOTH, expand = 1)
self.nextButton = Button(self.frame, text = "Next ->",bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.checkCon())
self.nextButton.pack(fill = "x")
self.window.mainloop()
def checkCon(self):
hostname = "awesometracker.ddns.net"
param = '-n' if sys.platform in ['Windows', 'win32'] else '-c'
command = ['ping', param, 1, hostname]
response = os.system(' '.join(str(e) for e in command))
if response == 0:
LoginWindow(self.window, self.frame)
else:
self.internetProblem = Label(self.frame, text = "Conection error", bg = "red", relief = "solid", font = ("arial", 12, "bold"))
self.internetProblem.pack(fill = "x")
class LoginWindow():
def __init__(self, window, frame):
self.window = window
self.frame = frame
if self.window == None:
self.window = Tk()
self.window.geometry("350x200")
self.window.minsize(350, 200)
self.window.iconbitmap("media/favicon.ico")
self. window.title("AwesomeTracker")
else:
self.window.minsize(350, 200)
self.window.iconbitmap("media/favicon.ico")
self.window.title("AwesomeTracker")
if self.frame == None:
self.frame = Frame(self.window, bg = "#84e8e8", height = self.window.winfo_height(), width = self.window.winfo_width())
self.frame.pack(fill = BOTH, expand = 1)
else:
self.frame.pack_forget()
self.frame = Frame(self.window, bg = "#84e8e8", height = self.window.winfo_height(), width = self.window.winfo_width())
self.frame.pack(fill = BOTH, expand = 1)
self.frame.columnconfigure(0, weight = 1)
self.frame.columnconfigure(1, weight = 1)
self.frame.rowconfigure(0, weight = 1)
self.frame.rowconfigure(1, weight = 1)
self.frame.rowconfigure(2, weight = 1)
self.frame.rowconfigure(3, weight = 1)
self.frame.rowconfigure(4, weight = 0)
self.frame.rowconfigure(5, weight=0)
self.frame.rowconfigure(6, weight = 1)
self.greetingsLabel = Label(self.frame, text = "Log in", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.greetingsLabel.grid(row = 0, column = 0, columnspan = 2, sticky = N + S + E + W)
self.userLabel = Label(self.frame, text = "Email or user", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.userLabel.grid(row = 1, column = 0, sticky = N + S + E + W)
self.userInput = ttk.Entry(self.frame)
self.userInput.grid(row = 1, column = 1, sticky = N + S + E + W)
self.passwordLabel = Label(self.frame, text = "Password", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.passwordLabel.grid(row = 2, column = 0, sticky = N + S + E + W)
self.passwordInput = ttk.Entry(self.frame, show="*")
self.passwordInput.grid(row = 2, column = 1, sticky = N + S + E + W)
self.loginButton = Button(self.frame, text = "start sesion",bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.sessionButton(self.userInput.get(), self.passwordInput.get()))
self.loginButton.grid(row = 3, column = 0, columnspan = 2, sticky = N + S + E + W);
self.beforeButton = Button(self.frame, text = "<- Back",bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: MainWindow(self.window, self.frame))
self.beforeButton.grid(row = 6, column = 0, columnspan = 2, sticky = N + S + E + W);
def sessionButton(self, user, password):
url = "https://awesometracker.ddns.net/access"
data = {
'user': user,
'password': password
}
r = requests.post(url = url, data = data)
result = r.json()
if (result and result['status'] and result['status'] == 'ok'):
TrackerWindow(self.window, self.frame, result['data'])
else:
self.clean(self.userInput, self.passwordInput)
self.errorLabel1 = Label(self.frame, text = "User or password invalid", bg = "red", relief = "solid", font = ("arial", 12, "bold"))
self.errorLabel1.grid(row = 4, column = 0, columnspan = 2, sticky = N + S + E + W)
self.errorLabel2 = Button(self.frame, text = "Forgot password", bg = "red", relief = "solid", font = ("arial", 12, "bold"), command = lambda: webbrowser.open('https://awesometracker.ddns.net/forgotPassword', new = 2))
self.errorLabel2.grid(row = 5, column = 0, columnspan = 2, sticky = N + S + E + W)
def clean(self, *campos):
for campo in campos:
campo.delete(0, "end")
class TrackerWindow:
def __init__(self, window, frame, user):
self.window = window
self.frame = frame
self.script = 0
self.thread = 0
if self.window == None:
self.window = Tk()
self.window.geometry("1000x750")
self.window.minsize(750, 600)
self.window.iconbitmap("media/favicon.ico")
self.window.title("AwesomeTracker")
else:
self.window.minsize(750, 600)
self.window.iconbitmap("media/favicon.ico")
self.window.title("AwesomeTracker")
if self.frame == None:
self.frame = Frame(self.window, bg = "#F4F133")
self.frame.pack(fill = BOTH, expand = 1)
else:
self.frame.pack_forget()
self.frame = Frame(self.window, bg = "#F4F133")
self.frame.pack(fill = BOTH, expand = 1)
self.generalFrame = Frame(self.frame, bg = "#84e8e8")
self.generalFrame.pack(fill = X, anchor = N)
self.menuFrame = Frame(self.frame, bg = "#EEEEEE")
self.menuFrame.pack(fill = Y, side = LEFT, anchor = N + W)
self.contentFrame = Frame(self.frame, bg = "white")
self.contentFrame.pack(fill = BOTH, anchor = CENTER, expand = 1)
self.mainLabel = Label(self.generalFrame, text = "User: " + str(user['users.user']) + " | User code: " + str(user['users.code']), bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.mainLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
self.optionsLabel = Label(self.menuFrame, text = "Options", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.optionsLabel.grid(row = 0, column = 0, sticky = N + S + E + W)
self.optionsButton = Button(self.menuFrame, text = "Actions", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.changeFrame("options", user))
self.optionsButton.grid(row = 1, column=0, sticky=N + S + E + W)
self.logOutButton = Button(self.menuFrame, text = "Log out", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.logOut())
self.logOutButton.grid(row = 2, column = 0, sticky = N + S + E + W)
def changeFrame(self, option, user,):
if (option == "options"):
self.contentFrame.pack_forget()
self.contentFrame = Frame(self.frame, bg="white")
self.contentFrame.pack(fill = BOTH, anchor = CENTER, expand = 1)
appLabel = Label(self.contentFrame, text = "Actual window: No one", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
appLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
lastAppLabel = Label(self.contentFrame, text = "Last window: No one", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
lastAppLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
startButton = Button(self.contentFrame, text = "Start Tracker", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.start(user))
startButton.pack(fill = BOTH, anchor=CENTER, expand = 1)
stopButton = Button(self.contentFrame, text = "Stop Tracker", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.stop())
stopButton.pack(fill = BOTH, anchor = CENTER, expand = 1)
statusLabel = Label(self.contentFrame, text = "Tracker status: Stopped", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
statusLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
def start(self, user):
if self.script == 0:
self.script = script.Script(user, self.contentFrame)
self.thread = threading.Thread(target = self.script.start)
self.thread.start()
def stop(self):
if self.script != 0:
self.script.stop()
self.thread.join()
self.script = 0
self.thread = 0
def logOut(self):
self.stop()
LoginWindow(self.window, self.frame)
if __name__ == '__main__':
MainWindow(None , None) | from tkinter import *
from tkinter import ttk
import threading
import sys
import datetime
import script
import os
import requests
import webbrowser
class MainWindow():
def __init__(self, window, frame):
self.window = window
self.frame = frame
if window == None:
self.window = Tk()
self.window.minsize(250, 175)
self.window.iconbitmap("./media/favicon.ico")
self.window.title("AwesomeTracker")
else:
self.window.minsize(250, 175)
self.window.iconbitmap("./media/favicon.ico")
self.window.title("AwesomeTracker")
if frame == None:
self.frame = Frame(self.window, bg = "#F4F133")
self.frame.pack(fill = BOTH, expand = 1)
else:
self.frame.pack_forget()
self.frame = Frame(self.window)
self.frame.pack(fill = BOTH, expand = 1)
self.greetingsLabel = Label(self.frame, text = "Welcome to AwesomeTracker", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.greetingsLabel.pack(fill = "x")
self.versionLabel = Label(self.frame, text = "Version - V 0.1 \n\r Version notes: \n\r -Performance improvements\n\r", relief = "solid", font = ("arial", 12, "bold"))
self.versionLabel.pack(fill = BOTH, expand = 1)
self.nextButton = Button(self.frame, text = "Next ->",bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.checkCon())
self.nextButton.pack(fill = "x")
self.window.mainloop()
def checkCon(self):
hostname = "awesometracker.ddns.net"
param = '-n' if sys.platform in ['Windows', 'win32'] else '-c'
command = ['ping', param, 1, hostname]
response = os.system(' '.join(str(e) for e in command))
if response == 0:
LoginWindow(self.window, self.frame)
else:
self.internetProblem = Label(self.frame, text = "Conection error", bg = "red", relief = "solid", font = ("arial", 12, "bold"))
self.internetProblem.pack(fill = "x")
class LoginWindow():
def __init__(self, window, frame):
self.window = window
self.frame = frame
if self.window == None:
self.window = Tk()
self.window.geometry("350x200")
self.window.minsize(350, 200)
self.window.iconbitmap("media/favicon.ico")
self. window.title("AwesomeTracker")
else:
self.window.minsize(350, 200)
self.window.iconbitmap("media/favicon.ico")
self.window.title("AwesomeTracker")
if self.frame == None:
self.frame = Frame(self.window, bg = "#84e8e8", height = self.window.winfo_height(), width = self.window.winfo_width())
self.frame.pack(fill = BOTH, expand = 1)
else:
self.frame.pack_forget()
self.frame = Frame(self.window, bg = "#84e8e8", height = self.window.winfo_height(), width = self.window.winfo_width())
self.frame.pack(fill = BOTH, expand = 1)
self.frame.columnconfigure(0, weight = 1)
self.frame.columnconfigure(1, weight = 1)
self.frame.rowconfigure(0, weight = 1)
self.frame.rowconfigure(1, weight = 1)
self.frame.rowconfigure(2, weight = 1)
self.frame.rowconfigure(3, weight = 1)
self.frame.rowconfigure(4, weight = 0)
self.frame.rowconfigure(5, weight=0)
self.frame.rowconfigure(6, weight = 1)
self.greetingsLabel = Label(self.frame, text = "Log in", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.greetingsLabel.grid(row = 0, column = 0, columnspan = 2, sticky = N + S + E + W)
self.userLabel = Label(self.frame, text = "Email or user", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.userLabel.grid(row = 1, column = 0, sticky = N + S + E + W)
self.userInput = ttk.Entry(self.frame)
self.userInput.grid(row = 1, column = 1, sticky = N + S + E + W)
self.passwordLabel = Label(self.frame, text = "Password", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.passwordLabel.grid(row = 2, column = 0, sticky = N + S + E + W)
self.passwordInput = ttk.Entry(self.frame, show="*")
self.passwordInput.grid(row = 2, column = 1, sticky = N + S + E + W)
self.loginButton = Button(self.frame, text = "start sesion",bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.sessionButton(self.userInput.get(), self.passwordInput.get()))
self.loginButton.grid(row = 3, column = 0, columnspan = 2, sticky = N + S + E + W);
self.beforeButton = Button(self.frame, text = "<- Back",bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: MainWindow(self.window, self.frame))
self.beforeButton.grid(row = 6, column = 0, columnspan = 2, sticky = N + S + E + W);
def sessionButton(self, user, password):
url = "https://awesometracker.ddns.net/access"
data = {
'user': user,
'password': password
}
r = requests.post(url = url, data = data)
result = r.json()
if (result and result['status'] and result['status'] == 'ok'):
TrackerWindow(self.window, self.frame, result['data'])
else:
self.clean(self.userInput, self.passwordInput)
self.errorLabel1 = Label(self.frame, text = "User or password invalid", bg = "red", relief = "solid", font = ("arial", 12, "bold"))
self.errorLabel1.grid(row = 4, column = 0, columnspan = 2, sticky = N + S + E + W)
self.errorLabel2 = Button(self.frame, text = "Forgot password", bg = "red", relief = "solid", font = ("arial", 12, "bold"), command = lambda: webbrowser.open('https://awesometracker.ddns.net/forgotPassword', new = 2))
self.errorLabel2.grid(row = 5, column = 0, columnspan = 2, sticky = N + S + E + W)
def clean(self, *campos):
for campo in campos:
campo.delete(0, "end")
class TrackerWindow:
def __init__(self, window, frame, user):
self.window = window
self.frame = frame
self.script = 0
self.thread = 0
if self.window == None:
self.window = Tk()
self.window.geometry("1000x750")
self.window.minsize(750, 600)
self.window.iconbitmap("media/favicon.ico")
self.window.title("AwesomeTracker")
else:
self.window.minsize(750, 600)
self.window.iconbitmap("media/favicon.ico")
self.window.title("AwesomeTracker")
if self.frame == None:
self.frame = Frame(self.window, bg = "#F4F133")
self.frame.pack(fill = BOTH, expand = 1)
else:
self.frame.pack_forget()
self.frame = Frame(self.window, bg = "#F4F133")
self.frame.pack(fill = BOTH, expand = 1)
self.generalFrame = Frame(self.frame, bg = "#84e8e8")
self.generalFrame.pack(fill = X, anchor = N)
self.menuFrame = Frame(self.frame, bg = "#EEEEEE")
self.menuFrame.pack(fill = Y, side = LEFT, anchor = N + W)
self.contentFrame = Frame(self.frame, bg = "white")
self.contentFrame.pack(fill = BOTH, anchor = CENTER, expand = 1)
self.mainLabel = Label(self.generalFrame, text = "User: " + str(user['users.user']) + " | User code: " + str(user['users.code']), bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.mainLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
self.optionsLabel = Label(self.menuFrame, text = "Options", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
self.optionsLabel.grid(row = 0, column = 0, sticky = N + S + E + W)
self.optionsButton = Button(self.menuFrame, text = "Actions", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.changeFrame("options", user))
self.optionsButton.grid(row = 1, column=0, sticky=N + S + E + W)
self.logOutButton = Button(self.menuFrame, text = "Log out", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.logOut())
self.logOutButton.grid(row = 2, column = 0, sticky = N + S + E + W)
def changeFrame(self, option, user,):
if (option == "options"):
self.contentFrame.pack_forget()
self.contentFrame = Frame(self.frame, bg="white")
self.contentFrame.pack(fill = BOTH, anchor = CENTER, expand = 1)
appLabel = Label(self.contentFrame, text = "Actual window: No one", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
appLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
lastAppLabel = Label(self.contentFrame, text = "Last window: No one", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
lastAppLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
startButton = Button(self.contentFrame, text = "Start Tracker", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.start(user))
startButton.pack(fill = BOTH, anchor=CENTER, expand = 1)
stopButton = Button(self.contentFrame, text = "Stop Tracker", bg = "#84e8e8", font = ("arial", 12, "bold"), command = lambda: self.stop())
stopButton.pack(fill = BOTH, anchor = CENTER, expand = 1)
statusLabel = Label(self.contentFrame, text = "Tracker status: Stopped", bg = "#4EF037", relief = "solid", font = ("arial", 12, "bold"))
statusLabel.pack(fill = BOTH, anchor = CENTER, expand = 1)
def start(self, user):
if self.script == 0:
self.script = script.Script(user, self.contentFrame)
self.thread = threading.Thread(target = self.script.start)
self.thread.start()
def stop(self):
if self.script != 0:
self.script.stop()
self.thread.join()
self.script = 0
self.thread = 0
def logOut(self):
self.stop()
LoginWindow(self.window, self.frame)
if __name__ == '__main__':
MainWindow(None , None) | none | 1 | 2.884802 | 3 |
|
demo/python_homework/WEEK03/homework04.py | richardmyu/CS | 0 | 6630088 | # -*- coding: utf-8 -*-
import math
# 4.任意正整数 n 以内有多少个循环素数
# 循环素数:循环移动数位,构成的新数均为素数
# 举例:197 --> 197,971,719
# 是否素数
def is_prime(n):
counter = 0
for i in range(1, n + 1):
if n % i == 0:
counter += 1
else:
continue
if counter == 2:
return True
else:
return False
# 是否循环素数
def is_cycle_prime(n):
if n <= 0:
print("输入数字有误")
return 0
if not is_prime(n):
# print("请输入素数")
return 0
str_num = str(n)
num_len = len(str_num)
num = n
is_cp = True
for i in range(num_len):
a = num // 10
b = num % 10
num = int(b * math.pow(10, num_len - 1) + a)
# print("---", num)
if is_prime(int(num)):
continue
else:
is_cp = False
break
return is_cp
# 循环素数统计
def count_cp(n):
if n <= 1:
print("输入数据有误")
return 0
count = 0
for i in range(n):
if is_cycle_prime(i):
# print("cycle-prime: ", i)
count += 1
return count
print(count_cp(100))
| # -*- coding: utf-8 -*-
import math
# 4.任意正整数 n 以内有多少个循环素数
# 循环素数:循环移动数位,构成的新数均为素数
# 举例:197 --> 197,971,719
# 是否素数
def is_prime(n):
counter = 0
for i in range(1, n + 1):
if n % i == 0:
counter += 1
else:
continue
if counter == 2:
return True
else:
return False
# 是否循环素数
def is_cycle_prime(n):
if n <= 0:
print("输入数字有误")
return 0
if not is_prime(n):
# print("请输入素数")
return 0
str_num = str(n)
num_len = len(str_num)
num = n
is_cp = True
for i in range(num_len):
a = num // 10
b = num % 10
num = int(b * math.pow(10, num_len - 1) + a)
# print("---", num)
if is_prime(int(num)):
continue
else:
is_cp = False
break
return is_cp
# 循环素数统计
def count_cp(n):
if n <= 1:
print("输入数据有误")
return 0
count = 0
for i in range(n):
if is_cycle_prime(i):
# print("cycle-prime: ", i)
count += 1
return count
print(count_cp(100))
| zh | 0.605694 | # -*- coding: utf-8 -*- # 4.任意正整数 n 以内有多少个循环素数 # 循环素数:循环移动数位,构成的新数均为素数 # 举例:197 --> 197,971,719 # 是否素数 # 是否循环素数 # print("请输入素数") # print("---", num) # 循环素数统计 # print("cycle-prime: ", i) | 3.679309 | 4 |
tests/candidates/test_candidates.py | Prabh06/fonduer | 0 | 6630089 | <filename>tests/candidates/test_candidates.py<gh_stars>0
#! /usr/bin/env python
import logging
import os
import pytest
from fonduer import Meta
from fonduer.candidates import CandidateExtractor, MentionExtractor, MentionNgrams
from fonduer.candidates.matchers import PersonMatcher
from fonduer.candidates.mentions import Ngrams
from fonduer.candidates.models import Candidate, candidate_subclass, mention_subclass
from fonduer.parser import Parser
from fonduer.parser.models import Document, Sentence
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from tests.shared.hardware_matchers import part_matcher, temp_matcher, volt_matcher
from tests.shared.hardware_spaces import (
MentionNgramsPart,
MentionNgramsTemp,
MentionNgramsVolt,
)
from tests.shared.hardware_throttlers import temp_throttler, volt_throttler
logger = logging.getLogger(__name__)
ATTRIBUTE = "stg_temp_max"
DB = "cand_test"
def test_ngram_split(caplog):
"""Test ngram split."""
caplog.set_level(logging.INFO)
ngrams = Ngrams()
sent = Sentence()
# When a split_token appears in the middle of the text.
sent.text = "New-Text"
sent.words = ["New-Text"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 3
assert result[0].get_span() == "New-Text"
assert result[1].get_span() == "New"
assert result[2].get_span() == "Text"
# When a text ends with a split_token.
sent.text = "New-"
sent.words = ["New-"]
result = list(ngrams.apply(sent))
assert len(result) == 2
assert result[0].get_span() == "New-"
assert result[1].get_span() == "New"
# When a text starts with a split_token.
sent.text = "-Text"
sent.words = ["-Text"]
result = list(ngrams.apply(sent))
assert len(result) == 2
assert result[0].get_span() == "-Text"
assert result[1].get_span() == "Text"
# When more than one split_token appears.
sent.text = "New/Text-Word"
sent.words = ["New/Text-Word"]
result = list(ngrams.apply(sent))
assert len(result) == 3
assert result[0].get_span() == "New/Text-Word"
assert result[1].get_span() == "New"
assert result[2].get_span() == "Text-Word"
def test_span_char_start_and_char_end(caplog):
"""Test chart_start and char_end of TemporarySpan that comes from Ngrams.apply."""
caplog.set_level(logging.INFO)
ngrams = Ngrams()
sent = Sentence()
sent.text = "BC548BG"
sent.words = ["BC548BG"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 1
assert result[0].get_span() == "BC548BG"
assert result[0].char_start == 0
assert result[0].char_end == 6
def test_cand_gen(caplog):
"""Test extracting candidates from mentions from documents."""
caplog.set_level(logging.INFO)
# SpaCy on mac has issue on parallel parsing
if os.name == "posix":
logger.info("Using single core.")
PARALLEL = 1
else:
PARALLEL = 2 # Travis only gives 2 cores
max_docs = 10
session = Meta.init("postgres://localhost:5432/" + DB).Session()
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
# Parsing
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session, structural=True, lingual=True, visual=True, pdf_path=pdf_path
)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 5548
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, volt_ngrams], # Fail, mismatched arity
[part_matcher, temp_matcher, volt_matcher],
)
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_matcher, volt_ngrams],
[part_matcher, temp_matcher], # Fail, mismatched arity
)
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_ngrams, volt_ngrams],
[part_matcher, temp_matcher, volt_matcher],
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Part).count() == 234
assert session.query(Volt).count() == 107
assert session.query(Temp).count() == 125
part = session.query(Part).order_by(Part.id).all()[0]
volt = session.query(Volt).order_by(Volt.id).all()[0]
temp = session.query(Temp).order_by(Temp.id).all()[0]
logger.info("Part: {}".format(part.span))
logger.info("Volt: {}".format(volt.span))
logger.info("Temp: {}".format(temp.span))
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp, PartVolt],
throttlers=[
temp_throttler,
volt_throttler,
volt_throttler,
], # Fail, mismatched arity
)
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp], # Fail, mismatched arity
throttlers=[temp_throttler, volt_throttler],
)
# Test that no throttler in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt]
) # Pass, no throttler
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3654
assert session.query(PartVolt).count() == 3657
assert session.query(Candidate).count() == 7311
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
# Test that None in throttlers in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, None]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3530
assert session.query(PartVolt).count() == 3657
assert session.query(Candidate).count() == 7187
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, volt_throttler]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3530
assert session.query(PartVolt).count() == 3313
assert session.query(Candidate).count() == 6843
assert docs[0].name == "112823"
assert len(docs[0].parts) == 70
assert len(docs[0].volts) == 33
assert len(docs[0].temps) == 18
# Test that deletion of a Candidate does not delete the Mention
session.query(PartTemp).delete()
assert session.query(PartTemp).count() == 0
assert session.query(Temp).count() == 125
assert session.query(Part).count() == 234
# Test deletion of Candidate if Mention is deleted
assert session.query(PartVolt).count() == 3313
assert session.query(Volt).count() == 107
session.query(Volt).delete()
assert session.query(Volt).count() == 0
assert session.query(PartVolt).count() == 0
def test_ngrams(caplog):
"""Test ngram limits in mention extraction"""
caplog.set_level(logging.INFO)
PARALLEL = 1
max_docs = 1
session = Meta.init("postgres://localhost:5432/" + DB).Session()
docs_path = "tests/data/pure_html/lincoln_short.html"
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(session, structural=True, lingual=True)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 503
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
Person = mention_subclass("Person")
person_ngrams = MentionNgrams(n_max=3)
person_matcher = PersonMatcher()
mention_extractor = MentionExtractor(
session, [Person], [person_ngrams], [person_matcher]
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Person).count() == 126
mentions = session.query(Person).all()
assert len([x for x in mentions if x.span.get_n() == 1]) == 50
assert len([x for x in mentions if x.span.get_n() > 3]) == 0
# Test for unigram exclusion
person_ngrams = MentionNgrams(n_min=2, n_max=3)
mention_extractor = MentionExtractor(
session, [Person], [person_ngrams], [person_matcher]
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Person).count() == 76
mentions = session.query(Person).all()
assert len([x for x in mentions if x.span.get_n() == 1]) == 0
assert len([x for x in mentions if x.span.get_n() > 3]) == 0
| <filename>tests/candidates/test_candidates.py<gh_stars>0
#! /usr/bin/env python
import logging
import os
import pytest
from fonduer import Meta
from fonduer.candidates import CandidateExtractor, MentionExtractor, MentionNgrams
from fonduer.candidates.matchers import PersonMatcher
from fonduer.candidates.mentions import Ngrams
from fonduer.candidates.models import Candidate, candidate_subclass, mention_subclass
from fonduer.parser import Parser
from fonduer.parser.models import Document, Sentence
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from tests.shared.hardware_matchers import part_matcher, temp_matcher, volt_matcher
from tests.shared.hardware_spaces import (
MentionNgramsPart,
MentionNgramsTemp,
MentionNgramsVolt,
)
from tests.shared.hardware_throttlers import temp_throttler, volt_throttler
logger = logging.getLogger(__name__)
ATTRIBUTE = "stg_temp_max"
DB = "cand_test"
def test_ngram_split(caplog):
"""Test ngram split."""
caplog.set_level(logging.INFO)
ngrams = Ngrams()
sent = Sentence()
# When a split_token appears in the middle of the text.
sent.text = "New-Text"
sent.words = ["New-Text"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 3
assert result[0].get_span() == "New-Text"
assert result[1].get_span() == "New"
assert result[2].get_span() == "Text"
# When a text ends with a split_token.
sent.text = "New-"
sent.words = ["New-"]
result = list(ngrams.apply(sent))
assert len(result) == 2
assert result[0].get_span() == "New-"
assert result[1].get_span() == "New"
# When a text starts with a split_token.
sent.text = "-Text"
sent.words = ["-Text"]
result = list(ngrams.apply(sent))
assert len(result) == 2
assert result[0].get_span() == "-Text"
assert result[1].get_span() == "Text"
# When more than one split_token appears.
sent.text = "New/Text-Word"
sent.words = ["New/Text-Word"]
result = list(ngrams.apply(sent))
assert len(result) == 3
assert result[0].get_span() == "New/Text-Word"
assert result[1].get_span() == "New"
assert result[2].get_span() == "Text-Word"
def test_span_char_start_and_char_end(caplog):
"""Test chart_start and char_end of TemporarySpan that comes from Ngrams.apply."""
caplog.set_level(logging.INFO)
ngrams = Ngrams()
sent = Sentence()
sent.text = "BC548BG"
sent.words = ["BC548BG"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 1
assert result[0].get_span() == "BC548BG"
assert result[0].char_start == 0
assert result[0].char_end == 6
def test_cand_gen(caplog):
"""Test extracting candidates from mentions from documents."""
caplog.set_level(logging.INFO)
# SpaCy on mac has issue on parallel parsing
if os.name == "posix":
logger.info("Using single core.")
PARALLEL = 1
else:
PARALLEL = 2 # Travis only gives 2 cores
max_docs = 10
session = Meta.init("postgres://localhost:5432/" + DB).Session()
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
# Parsing
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session, structural=True, lingual=True, visual=True, pdf_path=pdf_path
)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 5548
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, volt_ngrams], # Fail, mismatched arity
[part_matcher, temp_matcher, volt_matcher],
)
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_matcher, volt_ngrams],
[part_matcher, temp_matcher], # Fail, mismatched arity
)
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_ngrams, volt_ngrams],
[part_matcher, temp_matcher, volt_matcher],
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Part).count() == 234
assert session.query(Volt).count() == 107
assert session.query(Temp).count() == 125
part = session.query(Part).order_by(Part.id).all()[0]
volt = session.query(Volt).order_by(Volt.id).all()[0]
temp = session.query(Temp).order_by(Temp.id).all()[0]
logger.info("Part: {}".format(part.span))
logger.info("Volt: {}".format(volt.span))
logger.info("Temp: {}".format(temp.span))
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp, PartVolt],
throttlers=[
temp_throttler,
volt_throttler,
volt_throttler,
], # Fail, mismatched arity
)
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp], # Fail, mismatched arity
throttlers=[temp_throttler, volt_throttler],
)
# Test that no throttler in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt]
) # Pass, no throttler
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3654
assert session.query(PartVolt).count() == 3657
assert session.query(Candidate).count() == 7311
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
# Test that None in throttlers in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, None]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3530
assert session.query(PartVolt).count() == 3657
assert session.query(Candidate).count() == 7187
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, volt_throttler]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3530
assert session.query(PartVolt).count() == 3313
assert session.query(Candidate).count() == 6843
assert docs[0].name == "112823"
assert len(docs[0].parts) == 70
assert len(docs[0].volts) == 33
assert len(docs[0].temps) == 18
# Test that deletion of a Candidate does not delete the Mention
session.query(PartTemp).delete()
assert session.query(PartTemp).count() == 0
assert session.query(Temp).count() == 125
assert session.query(Part).count() == 234
# Test deletion of Candidate if Mention is deleted
assert session.query(PartVolt).count() == 3313
assert session.query(Volt).count() == 107
session.query(Volt).delete()
assert session.query(Volt).count() == 0
assert session.query(PartVolt).count() == 0
def test_ngrams(caplog):
"""Test ngram limits in mention extraction"""
caplog.set_level(logging.INFO)
PARALLEL = 1
max_docs = 1
session = Meta.init("postgres://localhost:5432/" + DB).Session()
docs_path = "tests/data/pure_html/lincoln_short.html"
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(session, structural=True, lingual=True)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 503
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
Person = mention_subclass("Person")
person_ngrams = MentionNgrams(n_max=3)
person_matcher = PersonMatcher()
mention_extractor = MentionExtractor(
session, [Person], [person_ngrams], [person_matcher]
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Person).count() == 126
mentions = session.query(Person).all()
assert len([x for x in mentions if x.span.get_n() == 1]) == 50
assert len([x for x in mentions if x.span.get_n() > 3]) == 0
# Test for unigram exclusion
person_ngrams = MentionNgrams(n_min=2, n_max=3)
mention_extractor = MentionExtractor(
session, [Person], [person_ngrams], [person_matcher]
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Person).count() == 76
mentions = session.query(Person).all()
assert len([x for x in mentions if x.span.get_n() == 1]) == 0
assert len([x for x in mentions if x.span.get_n() > 3]) == 0
| en | 0.886225 | #! /usr/bin/env python Test ngram split. # When a split_token appears in the middle of the text. # When a text ends with a split_token. # When a text starts with a split_token. # When more than one split_token appears. Test chart_start and char_end of TemporarySpan that comes from Ngrams.apply. Test extracting candidates from mentions from documents. # SpaCy on mac has issue on parallel parsing # Travis only gives 2 cores # Parsing # Mention Extraction # Fail, mismatched arity # Fail, mismatched arity # Candidate Extraction # Fail, mismatched arity # Fail, mismatched arity # Test that no throttler in candidate extractor # Pass, no throttler # Test that None in throttlers in candidate extractor # Test that deletion of a Candidate does not delete the Mention # Test deletion of Candidate if Mention is deleted Test ngram limits in mention extraction # Mention Extraction # Test for unigram exclusion | 2.272449 | 2 |
Basico/ex009.py | Gustavsantos/python1 | 0 | 6630090 | n=int(input('Quanto você gostaria de gastar em reais?'))
print('com {} voce podera comprar {:2} em dolares'.format(n,n//3.5))
| n=int(input('Quanto você gostaria de gastar em reais?'))
print('com {} voce podera comprar {:2} em dolares'.format(n,n//3.5))
| none | 1 | 3.740236 | 4 |
|
axelrod/strategies/lookerup.py | rjsu26/Axelrod | 0 | 6630091 | <reponame>rjsu26/Axelrod<gh_stars>0
from collections import namedtuple
from itertools import product
from typing import Any, TypeVar
from axelrod.action import Action, actions_to_str, str_to_actions
from axelrod.player import Player
C, D = Action.C, Action.D
Plays = namedtuple("Plays", "self_plays, op_plays, op_openings")
Reaction = TypeVar("Reaction", Action, float)
class LookupTable(object):
"""
LookerUp and its children use this object to determine their next actions.
It is an object that creates a table of all possible plays to a specified
depth and the action to be returned for each combination of plays.
The "get" method returns the appropriate response.
For the table containing::
....
Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, C): D
Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, D): C
...
with:
player.history[-2:]=[C, C] and
opponent.history[-2:]=[C, D] and
opponent.history[:2]=[D, D],
calling LookupTable.get(plays=(C, C), op_plays=(C, D), op_openings=(D, D))
will return C.
Instantiate the table with a lookup_dict. This is
{(self_plays_tuple, op_plays_tuple, op_openings_tuple): action, ...}.
It must contain every possible
permutation with C's and D's of the above tuple. so::
good_dict = {((C,), (C,), ()): C,
((C,), (D,), ()): C,
((D,), (C,), ()): D,
((D,), (D,), ()): C}
bad_dict = {((C,), (C,), ()): C,
((C,), (D,), ()): C,
((D,), (C,), ()): D}
LookupTable.from_pattern() creates an ordered list of keys for you and maps
the pattern to the keys.::
LookupTable.from_pattern(pattern=(C, D, D, C),
player_depth=0, op_depth=1, op_openings_depth=1
)
creates the dictionary::
{Plays(self_plays=(), op_plays=(C), op_openings=(C)): C,
Plays(self_plays=(), op_plays=(C), op_openings=(D)): D,
Plays(self_plays=(), op_plays=(D), op_openings=(C)): D,
Plays(self_plays=(), op_plays=(D), op_openings=(D)): C,}
and then returns a LookupTable with that dictionary.
"""
def __init__(self, lookup_dict: dict) -> None:
self._dict = make_keys_into_plays(lookup_dict)
sample_key = next(iter(self._dict))
self._plays_depth = len(sample_key.self_plays)
self._op_plays_depth = len(sample_key.op_plays)
self._op_openings_depth = len(sample_key.op_openings)
self._table_depth = max(
self._plays_depth, self._op_plays_depth, self._op_openings_depth
)
self._raise_error_for_bad_lookup_dict()
def _raise_error_for_bad_lookup_dict(self):
if any(
len(key.self_plays) != self._plays_depth
or len(key.op_plays) != self._op_plays_depth
or len(key.op_openings) != self._op_openings_depth
for key in self._dict
):
raise ValueError("Lookup table keys are not all the same size.")
total_key_combinations = 2 ** (
self._plays_depth + self._op_plays_depth + self._op_openings_depth
)
if total_key_combinations != len(self._dict):
msg = (
"Lookup table does not have enough keys"
+ " to cover all possibilities."
)
raise ValueError(msg)
@classmethod
def from_pattern(
cls, pattern: tuple, player_depth: int, op_depth: int, op_openings_depth: int
):
keys = create_lookup_table_keys(
player_depth=player_depth,
op_depth=op_depth,
op_openings_depth=op_openings_depth,
)
if len(keys) != len(pattern):
msg = "Pattern must be len: {}, but was len: {}".format(
len(keys), len(pattern)
)
raise ValueError(msg)
input_dict = dict(zip(keys, pattern))
return cls(input_dict)
def get(self, plays: tuple, op_plays: tuple, op_openings: tuple) -> Any:
return self._dict[
Plays(self_plays=plays, op_plays=op_plays, op_openings=op_openings)
]
@property
def player_depth(self) -> int:
return self._plays_depth
@property
def op_depth(self) -> int:
return self._op_plays_depth
@property
def op_openings_depth(self) -> int:
return self._op_openings_depth
@property
def table_depth(self) -> int:
return self._table_depth
@property
def dictionary(self) -> dict:
return self._dict.copy()
def display(
self, sort_by: tuple = ("op_openings", "self_plays", "op_plays")
) -> str:
"""
Returns a string for printing lookup_table info in specified order.
:param sort_by: only_elements='self_plays', 'op_plays', 'op_openings'
"""
def sorter(plays):
return tuple(actions_to_str(getattr(plays, field) for field in sort_by))
col_width = 11
sorted_keys = sorted(self._dict, key=sorter)
header_line = (
"{str_list[0]:^{width}}|"
+ "{str_list[1]:^{width}}|"
+ "{str_list[2]:^{width}}"
)
display_line = header_line.replace("|", ",") + ": {str_list[3]},"
def make_commaed_str(action_tuple):
return ", ".join(str(action) for action in action_tuple)
line_elements = [
(
make_commaed_str(getattr(key, sort_by[0])),
make_commaed_str(getattr(key, sort_by[1])),
make_commaed_str(getattr(key, sort_by[2])),
self._dict[key],
)
for key in sorted_keys
]
header = header_line.format(str_list=sort_by, width=col_width) + "\n"
lines = [
display_line.format(str_list=line, width=col_width)
for line in line_elements
]
return header + "\n".join(lines) + "\n"
def __eq__(self, other) -> bool:
if not isinstance(other, LookupTable):
return False
return self._dict == other.dictionary
def make_keys_into_plays(lookup_table: dict) -> dict:
"""Returns a dict where all keys are Plays."""
new_table = lookup_table.copy()
if any(not isinstance(key, Plays) for key in new_table):
new_table = {Plays(*key): value for key, value in new_table.items()}
return new_table
def create_lookup_table_keys(
player_depth: int, op_depth: int, op_openings_depth: int
) -> list:
"""Returns a list of Plays that has all possible permutations of C's and
D's for each specified depth. the list is in order,
C < D sorted by ((player_tuple), (op_tuple), (op_openings_tuple)).
create_lookup_keys(2, 1, 0) returns::
[Plays(self_plays=(C, C), op_plays=(C,), op_openings=()),
Plays(self_plays=(C, C), op_plays=(D,), op_openings=()),
Plays(self_plays=(C, D), op_plays=(C,), op_openings=()),
Plays(self_plays=(C, D), op_plays=(D,), op_openings=()),
Plays(self_plays=(D, C), op_plays=(C,), op_openings=()),
Plays(self_plays=(D, C), op_plays=(D,), op_openings=()),
Plays(self_plays=(D, D), op_plays=(C,), op_openings=()),
Plays(self_plays=(D, D), op_plays=(D,), op_openings=())]
"""
self_plays = product((C, D), repeat=player_depth)
op_plays = product((C, D), repeat=op_depth)
op_openings = product((C, D), repeat=op_openings_depth)
iterator = product(self_plays, op_plays, op_openings)
return [Plays(*plays_tuple) for plays_tuple in iterator]
class LookerUp(Player):
"""
This strategy uses a LookupTable to decide its next action. If there is not
enough history to use the table, it calls from a list of
self.initial_actions.
if self_depth=2, op_depth=3, op_openings_depth=5, LookerUp finds the last 2
plays of self, the last 3 plays of opponent and the opening 5 plays of
opponent. It then looks those up on the LookupTable and returns the
appropriate action. If 5 rounds have not been played (the minimum required
for op_openings_depth), it calls from self.initial_actions.
LookerUp can be instantiated with a dictionary. The dictionary uses
tuple(tuple, tuple, tuple) or Plays as keys. for example.
- self_plays: depth=2
- op_plays: depth=1
- op_openings: depth=0::
{Plays((C, C), (C), ()): C,
Plays((C, C), (D), ()): D,
Plays((C, D), (C), ()): D, <- example below
Plays((C, D), (D), ()): D,
Plays((D, C), (C), ()): C,
Plays((D, C), (D), ()): D,
Plays((D, D), (C), ()): C,
Plays((D, D), (D), ()): D}
From the above table, if the player last played C, D and the opponent last
played C (here the initial opponent play is ignored) then this round,
the player would play D.
The dictionary must contain all possible permutations of C's and D's.
LookerUp can also be instantiated with `pattern=str/tuple` of actions, and::
parameters=Plays(
self_plays=player_depth: int,
op_plays=op_depth: int,
op_openings=op_openings_depth: int)
It will create keys of len=2 ** (sum(parameters)) and map the pattern to
the keys.
initial_actions is a tuple such as (C, C, D). A table needs initial actions
equal to max(self_plays depth, opponent_plays depth, opponent_initial_plays
depth). If provided initial_actions is too long, the extra will be ignored.
If provided initial_actions is too short, the shortfall will be made up
with C's.
Some well-known strategies can be expressed as special cases; for example
Cooperator is given by the dict (All history is ignored and always play C)::
{Plays((), (), ()) : C}
Tit-For-Tat is given by (The only history that is important is the
opponent's last play.)::
{Plays((), (D,), ()): D,
Plays((), (C,), ()): C}
LookerUp's LookupTable defaults to Tit-For-Tat. The initial_actions
defaults to playing C.
Names:
- Lookerup: Original name by <NAME>
"""
name = "LookerUp"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
default_tft_lookup_table = {
Plays(self_plays=(), op_plays=(D,), op_openings=()): D,
Plays(self_plays=(), op_plays=(C,), op_openings=()): C,
}
def __init__(
self,
lookup_dict: dict = None,
initial_actions: tuple = None,
pattern: Any = None, # pattern is str or tuple of Action's.
parameters: Plays = None,
) -> None:
super().__init__()
self._lookup = self._get_lookup_table(lookup_dict, pattern, parameters)
self._set_memory_depth()
self.initial_actions = self._get_initial_actions(initial_actions)
self._initial_actions_pool = list(self.initial_actions)
def _get_lookup_table(
self, lookup_dict: dict, pattern: Any, parameters: tuple
) -> LookupTable:
if lookup_dict:
return LookupTable(lookup_dict=lookup_dict)
if pattern is not None and parameters is not None:
if isinstance(pattern, str):
pattern = str_to_actions(pattern)
self_depth, op_depth, op_openings_depth = parameters
return LookupTable.from_pattern(
pattern, self_depth, op_depth, op_openings_depth
)
return LookupTable(self.default_tft_lookup_table)
def _set_memory_depth(self):
if self._lookup.op_openings_depth == 0:
self.classifier["memory_depth"] = self._lookup.table_depth
else:
self.classifier["memory_depth"] = float("inf")
def _get_initial_actions(self, initial_actions: tuple) -> tuple:
"""Initial actions will always be cut down to table_depth."""
table_depth = self._lookup.table_depth
if not initial_actions:
return tuple([C] * table_depth)
initial_actions_shortfall = table_depth - len(initial_actions)
if initial_actions_shortfall > 0:
return initial_actions + tuple([C] * initial_actions_shortfall)
return initial_actions[:table_depth]
def strategy(self, opponent: Player) -> Reaction:
turn_index = len(opponent.history)
while turn_index < len(self._initial_actions_pool):
return self._initial_actions_pool[turn_index]
player_last_n_plays = get_last_n_plays(
player=self, depth=self._lookup.player_depth
)
opponent_last_n_plays = get_last_n_plays(
player=opponent, depth=self._lookup.op_depth
)
opponent_initial_plays = tuple(
opponent.history[: self._lookup.op_openings_depth]
)
return self._lookup.get(
player_last_n_plays, opponent_last_n_plays, opponent_initial_plays
)
@property
def lookup_dict(self):
return self._lookup.dictionary
def lookup_table_display(
self, sort_by: tuple = ("op_openings", "self_plays", "op_plays")
) -> str:
"""
Returns a string for printing lookup_table info in specified order.
:param sort_by: only_elements='self_plays', 'op_plays', 'op_openings'
"""
return self._lookup.display(sort_by=sort_by)
class EvolvedLookerUp1_1_1(LookerUp):
"""
A 1 1 1 Lookerup trained with an evolutionary algorithm.
Names:
- Evolved Lookerup 1 1 1: Original name by <NAME>
"""
name = "EvolvedLookerUp1_1_1"
def __init__(self) -> None:
params = Plays(self_plays=1, op_plays=1, op_openings=1)
super().__init__(parameters=params, pattern="CDDDDCDD", initial_actions=(C,))
class EvolvedLookerUp2_2_2(LookerUp):
"""
A 2 2 2 Lookerup trained with an evolutionary algorithm.
Names:
- Evolved Lookerup 2 2 2: Original name by <NAME>
"""
name = "EvolvedLookerUp2_2_2"
def __init__(self) -> None:
params = Plays(self_plays=2, op_plays=2, op_openings=2)
pattern = "CDDCDCDDCDDDCDDDDDCDCDCCCDDCCDCDDDCCCCCDDDCDDDDDDDDDCCDDCDDDCCCD"
super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C))
class Winner12(LookerUp):
"""
A lookup table based strategy.
Names:
- Winner12: [Mathieu2015]_
"""
name = "Winner12"
def __init__(self) -> None:
params = Plays(self_plays=1, op_plays=2, op_openings=0)
pattern = "CDCDDCDD"
super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C))
class Winner21(LookerUp):
"""
A lookup table based strategy.
Names:
- Winner21: [Mathieu2015]_
"""
name = "Winner21"
def __init__(self) -> None:
params = Plays(self_plays=1, op_plays=2, op_openings=0)
pattern = "CDCDCDDD"
super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C))
def get_last_n_plays(player: Player, depth: int) -> tuple:
"""Returns the last N plays of player as a tuple."""
if depth == 0:
return ()
return tuple(player.history[-1 * depth :])
| from collections import namedtuple
from itertools import product
from typing import Any, TypeVar
from axelrod.action import Action, actions_to_str, str_to_actions
from axelrod.player import Player
C, D = Action.C, Action.D
Plays = namedtuple("Plays", "self_plays, op_plays, op_openings")
Reaction = TypeVar("Reaction", Action, float)
class LookupTable(object):
"""
LookerUp and its children use this object to determine their next actions.
It is an object that creates a table of all possible plays to a specified
depth and the action to be returned for each combination of plays.
The "get" method returns the appropriate response.
For the table containing::
....
Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, C): D
Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, D): C
...
with:
player.history[-2:]=[C, C] and
opponent.history[-2:]=[C, D] and
opponent.history[:2]=[D, D],
calling LookupTable.get(plays=(C, C), op_plays=(C, D), op_openings=(D, D))
will return C.
Instantiate the table with a lookup_dict. This is
{(self_plays_tuple, op_plays_tuple, op_openings_tuple): action, ...}.
It must contain every possible
permutation with C's and D's of the above tuple. so::
good_dict = {((C,), (C,), ()): C,
((C,), (D,), ()): C,
((D,), (C,), ()): D,
((D,), (D,), ()): C}
bad_dict = {((C,), (C,), ()): C,
((C,), (D,), ()): C,
((D,), (C,), ()): D}
LookupTable.from_pattern() creates an ordered list of keys for you and maps
the pattern to the keys.::
LookupTable.from_pattern(pattern=(C, D, D, C),
player_depth=0, op_depth=1, op_openings_depth=1
)
creates the dictionary::
{Plays(self_plays=(), op_plays=(C), op_openings=(C)): C,
Plays(self_plays=(), op_plays=(C), op_openings=(D)): D,
Plays(self_plays=(), op_plays=(D), op_openings=(C)): D,
Plays(self_plays=(), op_plays=(D), op_openings=(D)): C,}
and then returns a LookupTable with that dictionary.
"""
def __init__(self, lookup_dict: dict) -> None:
self._dict = make_keys_into_plays(lookup_dict)
sample_key = next(iter(self._dict))
self._plays_depth = len(sample_key.self_plays)
self._op_plays_depth = len(sample_key.op_plays)
self._op_openings_depth = len(sample_key.op_openings)
self._table_depth = max(
self._plays_depth, self._op_plays_depth, self._op_openings_depth
)
self._raise_error_for_bad_lookup_dict()
def _raise_error_for_bad_lookup_dict(self):
if any(
len(key.self_plays) != self._plays_depth
or len(key.op_plays) != self._op_plays_depth
or len(key.op_openings) != self._op_openings_depth
for key in self._dict
):
raise ValueError("Lookup table keys are not all the same size.")
total_key_combinations = 2 ** (
self._plays_depth + self._op_plays_depth + self._op_openings_depth
)
if total_key_combinations != len(self._dict):
msg = (
"Lookup table does not have enough keys"
+ " to cover all possibilities."
)
raise ValueError(msg)
@classmethod
def from_pattern(
cls, pattern: tuple, player_depth: int, op_depth: int, op_openings_depth: int
):
keys = create_lookup_table_keys(
player_depth=player_depth,
op_depth=op_depth,
op_openings_depth=op_openings_depth,
)
if len(keys) != len(pattern):
msg = "Pattern must be len: {}, but was len: {}".format(
len(keys), len(pattern)
)
raise ValueError(msg)
input_dict = dict(zip(keys, pattern))
return cls(input_dict)
def get(self, plays: tuple, op_plays: tuple, op_openings: tuple) -> Any:
return self._dict[
Plays(self_plays=plays, op_plays=op_plays, op_openings=op_openings)
]
@property
def player_depth(self) -> int:
return self._plays_depth
@property
def op_depth(self) -> int:
return self._op_plays_depth
@property
def op_openings_depth(self) -> int:
return self._op_openings_depth
@property
def table_depth(self) -> int:
return self._table_depth
@property
def dictionary(self) -> dict:
return self._dict.copy()
def display(
self, sort_by: tuple = ("op_openings", "self_plays", "op_plays")
) -> str:
"""
Returns a string for printing lookup_table info in specified order.
:param sort_by: only_elements='self_plays', 'op_plays', 'op_openings'
"""
def sorter(plays):
return tuple(actions_to_str(getattr(plays, field) for field in sort_by))
col_width = 11
sorted_keys = sorted(self._dict, key=sorter)
header_line = (
"{str_list[0]:^{width}}|"
+ "{str_list[1]:^{width}}|"
+ "{str_list[2]:^{width}}"
)
display_line = header_line.replace("|", ",") + ": {str_list[3]},"
def make_commaed_str(action_tuple):
return ", ".join(str(action) for action in action_tuple)
line_elements = [
(
make_commaed_str(getattr(key, sort_by[0])),
make_commaed_str(getattr(key, sort_by[1])),
make_commaed_str(getattr(key, sort_by[2])),
self._dict[key],
)
for key in sorted_keys
]
header = header_line.format(str_list=sort_by, width=col_width) + "\n"
lines = [
display_line.format(str_list=line, width=col_width)
for line in line_elements
]
return header + "\n".join(lines) + "\n"
def __eq__(self, other) -> bool:
if not isinstance(other, LookupTable):
return False
return self._dict == other.dictionary
def make_keys_into_plays(lookup_table: dict) -> dict:
"""Returns a dict where all keys are Plays."""
new_table = lookup_table.copy()
if any(not isinstance(key, Plays) for key in new_table):
new_table = {Plays(*key): value for key, value in new_table.items()}
return new_table
def create_lookup_table_keys(
player_depth: int, op_depth: int, op_openings_depth: int
) -> list:
"""Returns a list of Plays that has all possible permutations of C's and
D's for each specified depth. the list is in order,
C < D sorted by ((player_tuple), (op_tuple), (op_openings_tuple)).
create_lookup_keys(2, 1, 0) returns::
[Plays(self_plays=(C, C), op_plays=(C,), op_openings=()),
Plays(self_plays=(C, C), op_plays=(D,), op_openings=()),
Plays(self_plays=(C, D), op_plays=(C,), op_openings=()),
Plays(self_plays=(C, D), op_plays=(D,), op_openings=()),
Plays(self_plays=(D, C), op_plays=(C,), op_openings=()),
Plays(self_plays=(D, C), op_plays=(D,), op_openings=()),
Plays(self_plays=(D, D), op_plays=(C,), op_openings=()),
Plays(self_plays=(D, D), op_plays=(D,), op_openings=())]
"""
self_plays = product((C, D), repeat=player_depth)
op_plays = product((C, D), repeat=op_depth)
op_openings = product((C, D), repeat=op_openings_depth)
iterator = product(self_plays, op_plays, op_openings)
return [Plays(*plays_tuple) for plays_tuple in iterator]
class LookerUp(Player):
"""
This strategy uses a LookupTable to decide its next action. If there is not
enough history to use the table, it calls from a list of
self.initial_actions.
if self_depth=2, op_depth=3, op_openings_depth=5, LookerUp finds the last 2
plays of self, the last 3 plays of opponent and the opening 5 plays of
opponent. It then looks those up on the LookupTable and returns the
appropriate action. If 5 rounds have not been played (the minimum required
for op_openings_depth), it calls from self.initial_actions.
LookerUp can be instantiated with a dictionary. The dictionary uses
tuple(tuple, tuple, tuple) or Plays as keys. for example.
- self_plays: depth=2
- op_plays: depth=1
- op_openings: depth=0::
{Plays((C, C), (C), ()): C,
Plays((C, C), (D), ()): D,
Plays((C, D), (C), ()): D, <- example below
Plays((C, D), (D), ()): D,
Plays((D, C), (C), ()): C,
Plays((D, C), (D), ()): D,
Plays((D, D), (C), ()): C,
Plays((D, D), (D), ()): D}
From the above table, if the player last played C, D and the opponent last
played C (here the initial opponent play is ignored) then this round,
the player would play D.
The dictionary must contain all possible permutations of C's and D's.
LookerUp can also be instantiated with `pattern=str/tuple` of actions, and::
parameters=Plays(
self_plays=player_depth: int,
op_plays=op_depth: int,
op_openings=op_openings_depth: int)
It will create keys of len=2 ** (sum(parameters)) and map the pattern to
the keys.
initial_actions is a tuple such as (C, C, D). A table needs initial actions
equal to max(self_plays depth, opponent_plays depth, opponent_initial_plays
depth). If provided initial_actions is too long, the extra will be ignored.
If provided initial_actions is too short, the shortfall will be made up
with C's.
Some well-known strategies can be expressed as special cases; for example
Cooperator is given by the dict (All history is ignored and always play C)::
{Plays((), (), ()) : C}
Tit-For-Tat is given by (The only history that is important is the
opponent's last play.)::
{Plays((), (D,), ()): D,
Plays((), (C,), ()): C}
LookerUp's LookupTable defaults to Tit-For-Tat. The initial_actions
defaults to playing C.
Names:
- Lookerup: Original name by <NAME>
"""
name = "LookerUp"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
default_tft_lookup_table = {
Plays(self_plays=(), op_plays=(D,), op_openings=()): D,
Plays(self_plays=(), op_plays=(C,), op_openings=()): C,
}
def __init__(
self,
lookup_dict: dict = None,
initial_actions: tuple = None,
pattern: Any = None, # pattern is str or tuple of Action's.
parameters: Plays = None,
) -> None:
super().__init__()
self._lookup = self._get_lookup_table(lookup_dict, pattern, parameters)
self._set_memory_depth()
self.initial_actions = self._get_initial_actions(initial_actions)
self._initial_actions_pool = list(self.initial_actions)
def _get_lookup_table(
self, lookup_dict: dict, pattern: Any, parameters: tuple
) -> LookupTable:
if lookup_dict:
return LookupTable(lookup_dict=lookup_dict)
if pattern is not None and parameters is not None:
if isinstance(pattern, str):
pattern = str_to_actions(pattern)
self_depth, op_depth, op_openings_depth = parameters
return LookupTable.from_pattern(
pattern, self_depth, op_depth, op_openings_depth
)
return LookupTable(self.default_tft_lookup_table)
def _set_memory_depth(self):
if self._lookup.op_openings_depth == 0:
self.classifier["memory_depth"] = self._lookup.table_depth
else:
self.classifier["memory_depth"] = float("inf")
def _get_initial_actions(self, initial_actions: tuple) -> tuple:
"""Initial actions will always be cut down to table_depth."""
table_depth = self._lookup.table_depth
if not initial_actions:
return tuple([C] * table_depth)
initial_actions_shortfall = table_depth - len(initial_actions)
if initial_actions_shortfall > 0:
return initial_actions + tuple([C] * initial_actions_shortfall)
return initial_actions[:table_depth]
def strategy(self, opponent: Player) -> Reaction:
turn_index = len(opponent.history)
while turn_index < len(self._initial_actions_pool):
return self._initial_actions_pool[turn_index]
player_last_n_plays = get_last_n_plays(
player=self, depth=self._lookup.player_depth
)
opponent_last_n_plays = get_last_n_plays(
player=opponent, depth=self._lookup.op_depth
)
opponent_initial_plays = tuple(
opponent.history[: self._lookup.op_openings_depth]
)
return self._lookup.get(
player_last_n_plays, opponent_last_n_plays, opponent_initial_plays
)
@property
def lookup_dict(self):
return self._lookup.dictionary
def lookup_table_display(
self, sort_by: tuple = ("op_openings", "self_plays", "op_plays")
) -> str:
"""
Returns a string for printing lookup_table info in specified order.
:param sort_by: only_elements='self_plays', 'op_plays', 'op_openings'
"""
return self._lookup.display(sort_by=sort_by)
class EvolvedLookerUp1_1_1(LookerUp):
"""
A 1 1 1 Lookerup trained with an evolutionary algorithm.
Names:
- Evolved Lookerup 1 1 1: Original name by <NAME>
"""
name = "EvolvedLookerUp1_1_1"
def __init__(self) -> None:
params = Plays(self_plays=1, op_plays=1, op_openings=1)
super().__init__(parameters=params, pattern="CDDDDCDD", initial_actions=(C,))
class EvolvedLookerUp2_2_2(LookerUp):
"""
A 2 2 2 Lookerup trained with an evolutionary algorithm.
Names:
- Evolved Lookerup 2 2 2: Original name by <NAME>
"""
name = "EvolvedLookerUp2_2_2"
def __init__(self) -> None:
params = Plays(self_plays=2, op_plays=2, op_openings=2)
pattern = "CDDCDCDDCDDDCDDDDDCDCDCCCDDCCDCDDDCCCCCDDDCDDDDDDDDDCCDDCDDDCCCD"
super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C))
class Winner12(LookerUp):
"""
A lookup table based strategy.
Names:
- Winner12: [Mathieu2015]_
"""
name = "Winner12"
def __init__(self) -> None:
params = Plays(self_plays=1, op_plays=2, op_openings=0)
pattern = "CDCDDCDD"
super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C))
class Winner21(LookerUp):
"""
A lookup table based strategy.
Names:
- Winner21: [Mathieu2015]_
"""
name = "Winner21"
def __init__(self) -> None:
params = Plays(self_plays=1, op_plays=2, op_openings=0)
pattern = "CDCDCDDD"
super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C))
def get_last_n_plays(player: Player, depth: int) -> tuple:
"""Returns the last N plays of player as a tuple."""
if depth == 0:
return ()
return tuple(player.history[-1 * depth :]) | en | 0.765894 | LookerUp and its children use this object to determine their next actions. It is an object that creates a table of all possible plays to a specified depth and the action to be returned for each combination of plays. The "get" method returns the appropriate response. For the table containing:: .... Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, C): D Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, D): C ... with: player.history[-2:]=[C, C] and opponent.history[-2:]=[C, D] and opponent.history[:2]=[D, D], calling LookupTable.get(plays=(C, C), op_plays=(C, D), op_openings=(D, D)) will return C. Instantiate the table with a lookup_dict. This is {(self_plays_tuple, op_plays_tuple, op_openings_tuple): action, ...}. It must contain every possible permutation with C's and D's of the above tuple. so:: good_dict = {((C,), (C,), ()): C, ((C,), (D,), ()): C, ((D,), (C,), ()): D, ((D,), (D,), ()): C} bad_dict = {((C,), (C,), ()): C, ((C,), (D,), ()): C, ((D,), (C,), ()): D} LookupTable.from_pattern() creates an ordered list of keys for you and maps the pattern to the keys.:: LookupTable.from_pattern(pattern=(C, D, D, C), player_depth=0, op_depth=1, op_openings_depth=1 ) creates the dictionary:: {Plays(self_plays=(), op_plays=(C), op_openings=(C)): C, Plays(self_plays=(), op_plays=(C), op_openings=(D)): D, Plays(self_plays=(), op_plays=(D), op_openings=(C)): D, Plays(self_plays=(), op_plays=(D), op_openings=(D)): C,} and then returns a LookupTable with that dictionary. Returns a string for printing lookup_table info in specified order. :param sort_by: only_elements='self_plays', 'op_plays', 'op_openings' Returns a dict where all keys are Plays. Returns a list of Plays that has all possible permutations of C's and D's for each specified depth. the list is in order, C < D sorted by ((player_tuple), (op_tuple), (op_openings_tuple)). create_lookup_keys(2, 1, 0) returns:: [Plays(self_plays=(C, C), op_plays=(C,), op_openings=()), Plays(self_plays=(C, C), op_plays=(D,), op_openings=()), Plays(self_plays=(C, D), op_plays=(C,), op_openings=()), Plays(self_plays=(C, D), op_plays=(D,), op_openings=()), Plays(self_plays=(D, C), op_plays=(C,), op_openings=()), Plays(self_plays=(D, C), op_plays=(D,), op_openings=()), Plays(self_plays=(D, D), op_plays=(C,), op_openings=()), Plays(self_plays=(D, D), op_plays=(D,), op_openings=())] This strategy uses a LookupTable to decide its next action. If there is not enough history to use the table, it calls from a list of self.initial_actions. if self_depth=2, op_depth=3, op_openings_depth=5, LookerUp finds the last 2 plays of self, the last 3 plays of opponent and the opening 5 plays of opponent. It then looks those up on the LookupTable and returns the appropriate action. If 5 rounds have not been played (the minimum required for op_openings_depth), it calls from self.initial_actions. LookerUp can be instantiated with a dictionary. The dictionary uses tuple(tuple, tuple, tuple) or Plays as keys. for example. - self_plays: depth=2 - op_plays: depth=1 - op_openings: depth=0:: {Plays((C, C), (C), ()): C, Plays((C, C), (D), ()): D, Plays((C, D), (C), ()): D, <- example below Plays((C, D), (D), ()): D, Plays((D, C), (C), ()): C, Plays((D, C), (D), ()): D, Plays((D, D), (C), ()): C, Plays((D, D), (D), ()): D} From the above table, if the player last played C, D and the opponent last played C (here the initial opponent play is ignored) then this round, the player would play D. The dictionary must contain all possible permutations of C's and D's. LookerUp can also be instantiated with `pattern=str/tuple` of actions, and:: parameters=Plays( self_plays=player_depth: int, op_plays=op_depth: int, op_openings=op_openings_depth: int) It will create keys of len=2 ** (sum(parameters)) and map the pattern to the keys. initial_actions is a tuple such as (C, C, D). A table needs initial actions equal to max(self_plays depth, opponent_plays depth, opponent_initial_plays depth). If provided initial_actions is too long, the extra will be ignored. If provided initial_actions is too short, the shortfall will be made up with C's. Some well-known strategies can be expressed as special cases; for example Cooperator is given by the dict (All history is ignored and always play C):: {Plays((), (), ()) : C} Tit-For-Tat is given by (The only history that is important is the opponent's last play.):: {Plays((), (D,), ()): D, Plays((), (C,), ()): C} LookerUp's LookupTable defaults to Tit-For-Tat. The initial_actions defaults to playing C. Names: - Lookerup: Original name by <NAME> # pattern is str or tuple of Action's. Initial actions will always be cut down to table_depth. Returns a string for printing lookup_table info in specified order. :param sort_by: only_elements='self_plays', 'op_plays', 'op_openings' A 1 1 1 Lookerup trained with an evolutionary algorithm. Names: - Evolved Lookerup 1 1 1: Original name by <NAME> A 2 2 2 Lookerup trained with an evolutionary algorithm. Names: - Evolved Lookerup 2 2 2: Original name by <NAME> A lookup table based strategy. Names: - Winner12: [Mathieu2015]_ A lookup table based strategy. Names: - Winner21: [Mathieu2015]_ Returns the last N plays of player as a tuple. | 3.148536 | 3 |
planetmint/web/views/base.py | liviu-lesan/planetmint | 0 | 6630092 | <reponame>liviu-lesan/planetmint
# Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Common classes and methods for API handlers
"""
import logging
from flask import jsonify, request
from planetmint.config import Config
logger = logging.getLogger(__name__)
def make_error(status_code, message=None):
if status_code == 404 and message is None:
message = 'Not found'
response_content = {'status': status_code, 'message': message}
request_info = {'method': request.method, 'path': request.path}
request_info.update(response_content)
logger.error('HTTP API error: %(status)s - %(method)s:%(path)s - %(message)s', request_info)
response = jsonify(response_content)
response.status_code = status_code
return response
def base_ws_uri():
"""Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
"""
config_wsserver = Config().get()['wsserver']
scheme = config_wsserver['advertised_scheme']
host = config_wsserver['advertised_host']
port = config_wsserver['advertised_port']
return '{}://{}:{}'.format(scheme, host, port)
| # Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Common classes and methods for API handlers
"""
import logging
from flask import jsonify, request
from planetmint.config import Config
logger = logging.getLogger(__name__)
def make_error(status_code, message=None):
if status_code == 404 and message is None:
message = 'Not found'
response_content = {'status': status_code, 'message': message}
request_info = {'method': request.method, 'path': request.path}
request_info.update(response_content)
logger.error('HTTP API error: %(status)s - %(method)s:%(path)s - %(message)s', request_info)
response = jsonify(response_content)
response.status_code = status_code
return response
def base_ws_uri():
"""Base websocket URL that is advertised to external clients.
Useful when the websocket URL advertised to the clients needs to be
customized (typically when running behind NAT, firewall, etc.)
"""
config_wsserver = Config().get()['wsserver']
scheme = config_wsserver['advertised_scheme']
host = config_wsserver['advertised_host']
port = config_wsserver['advertised_port']
return '{}://{}:{}'.format(scheme, host, port) | en | 0.752243 | # Copyright © 2020 Interplanetary Database Association e.V., # Planetmint and IPDB software contributors. # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 Common classes and methods for API handlers Base websocket URL that is advertised to external clients. Useful when the websocket URL advertised to the clients needs to be customized (typically when running behind NAT, firewall, etc.) | 2.09095 | 2 |
bot/src/globibot/lib/web/handlers.py | best-coloc-ever/globibot | 14 | 6630093 | <filename>bot/src/globibot/lib/web/handlers.py
from tornado.web import RequestHandler
from . import constants as c
class ContextHandler(RequestHandler):
def initialize(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class SessionHandler(ContextHandler):
def get_current_user(self):
cookie = self.get_secure_cookie(c.USER_COOKIE_NAME)
if cookie:
user_id = cookie.decode('ascii')
return self.bot.find_user(user_id)
| <filename>bot/src/globibot/lib/web/handlers.py
from tornado.web import RequestHandler
from . import constants as c
class ContextHandler(RequestHandler):
def initialize(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class SessionHandler(ContextHandler):
def get_current_user(self):
cookie = self.get_secure_cookie(c.USER_COOKIE_NAME)
if cookie:
user_id = cookie.decode('ascii')
return self.bot.find_user(user_id)
| none | 1 | 2.25122 | 2 |
|
world.py | ryansturmer/CraftBot | 3 | 6630094 | # gcc -std=c99 -O3 -shared -o world \
# -I src -I deps/noise deps/noise/noise.c src/world.c
from ctypes import CDLL, CFUNCTYPE, c_int, c_void_p
from collections import OrderedDict
try:
dll = CDLL('./world')
except:
import os
os.system('gcc -std=c99 -O3 -shared -o world.dll -I src -I deps/noise deps/noise/noise.c src/world.c')
dll = CDLL('./world')
WORLD_FUNC = CFUNCTYPE(None, c_int, c_int, c_int, c_int, c_void_p)
def dll_seed(x):
dll.seed(x)
def dll_create_world(p, q):
result = {}
def world_func(x, y, z, w, arg):
result[(x, y, z)] = w
dll.create_world(p, q, WORLD_FUNC(world_func), None)
return result
class World(object):
def __init__(self, seed=None, cache_size=512):
self.seed = seed
self.cache = OrderedDict()
self.cache_size = cache_size
def create_chunk(self, p, q):
if self.seed is not None:
dll_seed(self.seed)
return dll_create_world(p, q)
def get_chunk(self, p, q):
try:
chunk = self.cache.pop((p, q))
except KeyError:
chunk = self.create_chunk(p, q)
self.cache[(p, q)] = chunk
if len(self.cache) > self.cache_size:
self.cache.popitem(False)
return chunk
| # gcc -std=c99 -O3 -shared -o world \
# -I src -I deps/noise deps/noise/noise.c src/world.c
from ctypes import CDLL, CFUNCTYPE, c_int, c_void_p
from collections import OrderedDict
try:
dll = CDLL('./world')
except:
import os
os.system('gcc -std=c99 -O3 -shared -o world.dll -I src -I deps/noise deps/noise/noise.c src/world.c')
dll = CDLL('./world')
WORLD_FUNC = CFUNCTYPE(None, c_int, c_int, c_int, c_int, c_void_p)
def dll_seed(x):
dll.seed(x)
def dll_create_world(p, q):
result = {}
def world_func(x, y, z, w, arg):
result[(x, y, z)] = w
dll.create_world(p, q, WORLD_FUNC(world_func), None)
return result
class World(object):
def __init__(self, seed=None, cache_size=512):
self.seed = seed
self.cache = OrderedDict()
self.cache_size = cache_size
def create_chunk(self, p, q):
if self.seed is not None:
dll_seed(self.seed)
return dll_create_world(p, q)
def get_chunk(self, p, q):
try:
chunk = self.cache.pop((p, q))
except KeyError:
chunk = self.create_chunk(p, q)
self.cache[(p, q)] = chunk
if len(self.cache) > self.cache_size:
self.cache.popitem(False)
return chunk
| uk | 0.132689 | # gcc -std=c99 -O3 -shared -o world \ # -I src -I deps/noise deps/noise/noise.c src/world.c | 2.108246 | 2 |
apps/accounts/emails.py | TransparentHealth/bluebutton-web-server | 1 | 6630095 | <reponame>TransparentHealth/bluebutton-web-server
import random
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
logger = logging.getLogger('hhs_server.%s' % __name__)
def random_secret(y=40):
return ''.join(random.choice('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789') for x in range(y))
def notify_admin_of_invite_request(request_invite):
plaintext = get_template('email-invite-request-received.txt')
htmly = get_template('email-invite-request-received.html')
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"EMAIL": request_invite.email,
"FIRST_NAME": request_invite.first_name,
"LAST_NAME": request_invite.last_name,
"USER_TYPE": request_invite.user_type
}
subject = '[%s] Request for %s access from : %s %s' % (settings.APPLICATION_TITLE,
request_invite.user_type,
request_invite.first_name,
request_invite.last_name)
from_email = settings.DEFAULT_FROM_EMAIL
if settings.DEFAULT_FROM_EMAIL == settings.DEFAULT_ADMIN_EMAIL:
to_email = [settings.DEFAULT_ADMIN_EMAIL]
else:
to_email = [settings.DEFAULT_ADMIN_EMAIL, settings.DEFAULT_FROM_EMAIL]
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(subject, text_content, from_email, to_email)
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_invite_to_create_account(invitation):
plaintext = get_template('email-invite.txt')
htmly = get_template('email-invite.html')
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"CODE": invitation.code,
"URL": invitation.url(),
"EMAIL": invitation.email,
}
subject = '[%s] Invitation Code: %s' % (settings.APPLICATION_TITLE,
invitation.code)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = invitation.email
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_invitation_code_to_user(user_code_invitation):
plaintext = get_template('email-user-code-by-email.txt')
htmly = get_template('email-user-code-by-email.html')
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"CODE": user_code_invitation.code,
"URL": user_code_invitation.url(),
"EMAIL": user_code_invitation.email}
subject = '[%s] Invitation Code: %s' % (settings.APPLICATION_TITLE,
user_code_invitation.code)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user_code_invitation.email
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def mfa_via_email(user, code):
subject = '[%s] Your code for access to' % (settings.APPLICATION_TITLE)
from_email = settings.DEFAULT_FROM_EMAIL
to = user.email
html_content = """'
<P>
Provide this code on the authentication screen in your browser:<br>
%s
</p>
<p>
Thank you,
</p>
<p>
The %s Team
</P>
""" % (code, settings.APPLICATION_TITLE)
text_content = """
Provide this code on the authentication screen in your browser:
%s
Thank you,
The %s Team
""" % (code, settings.APPLICATION_TITLE)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, 'text/html')
msg.send()
def send_password_reset_url_via_email(user, reset_key):
plaintext = get_template('email-password-reset-link.txt')
htmly = get_template('email-password-reset-link.html')
subject = '[%s] Link to reset your password' % (settings.APPLICATION_TITLE)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user.email
password_reset_link = '%s%s' % (get_hostname(),
reverse('password_reset_email_verify',
args=(reset_key,)))
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"FIRST_NAME": user.first_name,
"LAST_NAME": user.last_name,
"PASSWORD_RESET_LINK": password_reset_link}
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_activation_key_via_email(user, signup_key):
"""Do not call this directly. Instead use create_signup_key in utils."""
plaintext = get_template('email-activate.txt')
htmly = get_template('email-activate.html')
subject = '[%s] Verify your email to complete account signup' % (
settings.APPLICATION_TITLE)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user.email
activation_link = '%s%s' % (get_hostname(),
reverse('activation_verify',
args=(signup_key,)))
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"FIRST_NAME": user.first_name,
"LAST_NAME": user.last_name,
"ACTIVATION_LINK": activation_link}
subject = '[%s] Verify your email to complete your account setup.' % (
settings.APPLICATION_TITLE)
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_invite_request_notices(invite_request):
subject = '[%s] Invitation Request Received' % (settings.ORGANIZATION_NAME)
from_email = settings.DEFAULT_FROM_EMAIL
to = invite_request.email
if invite_request.user_type == "DEV":
u_type = "<p>Thank you for your application to join the %s " \
"Developer Community.</p>" % settings.ORGANIZATION_NAME
else:
u_type = "<p>Welcome to the %s " \
"Community. We are excited to help you connect " \
"your Medicare information with a growing library of " \
"health applications.</p>" % settings.ORGANIZATION_NAME
html_content = """
<p>
Hello: %s %s,
</p>
<p>
Your request for an invite to the %s (%s) has been received.
</p>
%s
<p>
We will email you when your invitation code is ready.
Please be patient.
</p>
<p>
Thank You,
</p>
<p>
The %s Team
</p>
""" % (invite_request.first_name,
invite_request.last_name,
settings.ORGANIZATION_NAME,
get_hostname(),
u_type,
settings.ORGANIZATION_NAME)
text_content = """Hello: %s %s,
Your request for an invite to %s (%s) has been received.
""" % (invite_request.first_name,
invite_request.last_name,
settings.ORGANIZATION_NAME,
get_hostname())
msg = EmailMultiAlternatives(subject, text_content, from_email,
[to, settings.INVITE_REQUEST_ADMIN])
msg.attach_alternative(html_content, 'text/html')
msg.send()
def get_hostname():
hostname = getattr(settings, 'HOSTNAME_URL', 'http://localhost:8000')
if "http://" in hostname.lower():
pass
elif "https://" in hostname.lower():
pass
else:
logger.debug("HOSTNAME_URL [%s] "
"does not contain http or https prefix. "
"Issuer:%s" % (settings.HOSTNAME_URL, hostname))
# no http/https prefix in HOST_NAME_URL so we add it
hostname = "https://%s" % (hostname)
return hostname
| import random
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
logger = logging.getLogger('hhs_server.%s' % __name__)
def random_secret(y=40):
return ''.join(random.choice('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789') for x in range(y))
def notify_admin_of_invite_request(request_invite):
plaintext = get_template('email-invite-request-received.txt')
htmly = get_template('email-invite-request-received.html')
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"EMAIL": request_invite.email,
"FIRST_NAME": request_invite.first_name,
"LAST_NAME": request_invite.last_name,
"USER_TYPE": request_invite.user_type
}
subject = '[%s] Request for %s access from : %s %s' % (settings.APPLICATION_TITLE,
request_invite.user_type,
request_invite.first_name,
request_invite.last_name)
from_email = settings.DEFAULT_FROM_EMAIL
if settings.DEFAULT_FROM_EMAIL == settings.DEFAULT_ADMIN_EMAIL:
to_email = [settings.DEFAULT_ADMIN_EMAIL]
else:
to_email = [settings.DEFAULT_ADMIN_EMAIL, settings.DEFAULT_FROM_EMAIL]
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(subject, text_content, from_email, to_email)
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_invite_to_create_account(invitation):
plaintext = get_template('email-invite.txt')
htmly = get_template('email-invite.html')
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"CODE": invitation.code,
"URL": invitation.url(),
"EMAIL": invitation.email,
}
subject = '[%s] Invitation Code: %s' % (settings.APPLICATION_TITLE,
invitation.code)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = invitation.email
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_invitation_code_to_user(user_code_invitation):
plaintext = get_template('email-user-code-by-email.txt')
htmly = get_template('email-user-code-by-email.html')
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"CODE": user_code_invitation.code,
"URL": user_code_invitation.url(),
"EMAIL": user_code_invitation.email}
subject = '[%s] Invitation Code: %s' % (settings.APPLICATION_TITLE,
user_code_invitation.code)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user_code_invitation.email
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def mfa_via_email(user, code):
subject = '[%s] Your code for access to' % (settings.APPLICATION_TITLE)
from_email = settings.DEFAULT_FROM_EMAIL
to = user.email
html_content = """'
<P>
Provide this code on the authentication screen in your browser:<br>
%s
</p>
<p>
Thank you,
</p>
<p>
The %s Team
</P>
""" % (code, settings.APPLICATION_TITLE)
text_content = """
Provide this code on the authentication screen in your browser:
%s
Thank you,
The %s Team
""" % (code, settings.APPLICATION_TITLE)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, 'text/html')
msg.send()
def send_password_reset_url_via_email(user, reset_key):
plaintext = get_template('email-password-reset-link.txt')
htmly = get_template('email-password-reset-link.html')
subject = '[%s] Link to reset your password' % (settings.APPLICATION_TITLE)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user.email
password_reset_link = '%s%s' % (get_hostname(),
reverse('password_reset_email_verify',
args=(reset_key,)))
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"FIRST_NAME": user.first_name,
"LAST_NAME": user.last_name,
"PASSWORD_RESET_LINK": password_reset_link}
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_activation_key_via_email(user, signup_key):
"""Do not call this directly. Instead use create_signup_key in utils."""
plaintext = get_template('email-activate.txt')
htmly = get_template('email-activate.html')
subject = '[%s] Verify your email to complete account signup' % (
settings.APPLICATION_TITLE)
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user.email
activation_link = '%s%s' % (get_hostname(),
reverse('activation_verify',
args=(signup_key,)))
context = {"APPLICATION_TITLE": settings.APPLICATION_TITLE,
"FIRST_NAME": user.first_name,
"LAST_NAME": user.last_name,
"ACTIVATION_LINK": activation_link}
subject = '[%s] Verify your email to complete your account setup.' % (
settings.APPLICATION_TITLE)
text_content = plaintext.render(context)
html_content = htmly.render(context)
msg = EmailMultiAlternatives(
subject, text_content, from_email, [
to_email, ])
msg.attach_alternative(html_content, "text/html")
msg.send()
def send_invite_request_notices(invite_request):
subject = '[%s] Invitation Request Received' % (settings.ORGANIZATION_NAME)
from_email = settings.DEFAULT_FROM_EMAIL
to = invite_request.email
if invite_request.user_type == "DEV":
u_type = "<p>Thank you for your application to join the %s " \
"Developer Community.</p>" % settings.ORGANIZATION_NAME
else:
u_type = "<p>Welcome to the %s " \
"Community. We are excited to help you connect " \
"your Medicare information with a growing library of " \
"health applications.</p>" % settings.ORGANIZATION_NAME
html_content = """
<p>
Hello: %s %s,
</p>
<p>
Your request for an invite to the %s (%s) has been received.
</p>
%s
<p>
We will email you when your invitation code is ready.
Please be patient.
</p>
<p>
Thank You,
</p>
<p>
The %s Team
</p>
""" % (invite_request.first_name,
invite_request.last_name,
settings.ORGANIZATION_NAME,
get_hostname(),
u_type,
settings.ORGANIZATION_NAME)
text_content = """Hello: %s %s,
Your request for an invite to %s (%s) has been received.
""" % (invite_request.first_name,
invite_request.last_name,
settings.ORGANIZATION_NAME,
get_hostname())
msg = EmailMultiAlternatives(subject, text_content, from_email,
[to, settings.INVITE_REQUEST_ADMIN])
msg.attach_alternative(html_content, 'text/html')
msg.send()
def get_hostname():
hostname = getattr(settings, 'HOSTNAME_URL', 'http://localhost:8000')
if "http://" in hostname.lower():
pass
elif "https://" in hostname.lower():
pass
else:
logger.debug("HOSTNAME_URL [%s] "
"does not contain http or https prefix. "
"Issuer:%s" % (settings.HOSTNAME_URL, hostname))
# no http/https prefix in HOST_NAME_URL so we add it
hostname = "https://%s" % (hostname)
return hostname | en | 0.744263 | ' <P> Provide this code on the authentication screen in your browser:<br> %s </p> <p> Thank you, </p> <p> The %s Team </P> Provide this code on the authentication screen in your browser: %s Thank you, The %s Team Do not call this directly. Instead use create_signup_key in utils. <p> Hello: %s %s, </p> <p> Your request for an invite to the %s (%s) has been received. </p> %s <p> We will email you when your invitation code is ready. Please be patient. </p> <p> Thank You, </p> <p> The %s Team </p> Hello: %s %s, Your request for an invite to %s (%s) has been received. # no http/https prefix in HOST_NAME_URL so we add it | 2.165809 | 2 |
Demo/pdist/rcsclient.py | 1byte2bytes/cpython | 5 | 6630096 | <gh_stars>1-10
"""Customize this file to change the default client etc.
(In general, it is probably be better to make local operation the
default and to require something like an RCSSERVER environment
variable to enable remote operation.)
"""
import string
import os
# These defaults don't belong here -- they should be taken from the
# environment or from a hidden file in the current directory
HOST = 'voorn.cwi.nl'
PORT = 4127
VERBOSE = 1
LOCAL = 0
import client
class RCSProxyClient(client.SecureClient):
def __init__(self, address, verbose = client.VERBOSE):
client.SecureClient.__init__(self, address, verbose)
def openrcsclient(opts = []):
"open an RCSProxy client based on a list of options returned by getopt"
import RCSProxy
host = HOST
port = PORT
verbose = VERBOSE
local = LOCAL
directory = None
for o, a in opts:
if o == '-h':
host = a
if ':' in host:
i = string.find(host, ':')
host, p = host[:i], host[i+1:]
if p:
port = string.atoi(p)
if o == '-p':
port = string.atoi(a)
if o == '-d':
directory = a
if o == '-v':
verbose = verbose + 1
if o == '-q':
verbose = 0
if o == '-L':
local = 1
if local:
import RCSProxy
x = RCSProxy.RCSProxyLocal()
else:
address = (host, port)
x = RCSProxyClient(address, verbose)
if not directory:
try:
directory = open(os.path.join("CVS", "Repository")).readline()
except IOError:
pass
else:
if directory[-1] == '\n':
directory = directory[:-1]
if directory:
x.cd(directory)
return x
| """Customize this file to change the default client etc.
(In general, it is probably be better to make local operation the
default and to require something like an RCSSERVER environment
variable to enable remote operation.)
"""
import string
import os
# These defaults don't belong here -- they should be taken from the
# environment or from a hidden file in the current directory
HOST = 'voorn.cwi.nl'
PORT = 4127
VERBOSE = 1
LOCAL = 0
import client
class RCSProxyClient(client.SecureClient):
def __init__(self, address, verbose = client.VERBOSE):
client.SecureClient.__init__(self, address, verbose)
def openrcsclient(opts = []):
"open an RCSProxy client based on a list of options returned by getopt"
import RCSProxy
host = HOST
port = PORT
verbose = VERBOSE
local = LOCAL
directory = None
for o, a in opts:
if o == '-h':
host = a
if ':' in host:
i = string.find(host, ':')
host, p = host[:i], host[i+1:]
if p:
port = string.atoi(p)
if o == '-p':
port = string.atoi(a)
if o == '-d':
directory = a
if o == '-v':
verbose = verbose + 1
if o == '-q':
verbose = 0
if o == '-L':
local = 1
if local:
import RCSProxy
x = RCSProxy.RCSProxyLocal()
else:
address = (host, port)
x = RCSProxyClient(address, verbose)
if not directory:
try:
directory = open(os.path.join("CVS", "Repository")).readline()
except IOError:
pass
else:
if directory[-1] == '\n':
directory = directory[:-1]
if directory:
x.cd(directory)
return x | en | 0.857727 | Customize this file to change the default client etc. (In general, it is probably be better to make local operation the default and to require something like an RCSSERVER environment variable to enable remote operation.) # These defaults don't belong here -- they should be taken from the # environment or from a hidden file in the current directory | 2.332536 | 2 |
tests/utils/test_utilities.py | iamchetry/DataChallenge-Fall2021 | 1 | 6630097 | <filename>tests/utils/test_utilities.py
import numpy as np
import pytest
import time
from chemml.utils import list_del_indices
from chemml.utils import std_datetime_str
from chemml.utils import tot_exec_time_str
from chemml.utils import chunk
from chemml.utils import bool_formatter
from chemml.utils import regression_metrics
def test_list_del_indices():
mylist = list_del_indices([9,3,5,7,1], [4,2])
assert len(mylist) == 3
assert mylist == [9, 3, 7]
def test_std_datetime_str():
s = std_datetime_str(mode = 'datetime')
assert s[-3] == ':'
s = std_datetime_str(mode = 'date')
assert s[-3] == '-'
s = std_datetime_str(mode = 'time')
assert s[-3] == ':'
s = std_datetime_str(mode = 'datetime_ms')
assert s[4] == '-'
s = std_datetime_str(mode = 'time_ms')
assert s[2] == ':'
with pytest.raises(ValueError):
std_datetime_str(mode='hour')
def test_exec_time_str():
time_start = time.time()
time.sleep(0.5)
s = tot_exec_time_str(time_start)
assert int(s[-4]) >= 5
def test_chunk():
x = np.array(range(10))
y = np.array(range(20,30))
it = chunk(range(len(x)), 3, x, y)
x_chunk, y_chunk = next(it)
assert len(x_chunk) == 4
def test_bool_formatter():
bf_true = bool_formatter(True)
bf_false = bool_formatter(False)
assert bf_true == 'true'
assert bf_false == 'false'
def test_bool_formatter_exception():
with pytest.raises(ValueError):
bool_formatter('true')
def test_regression_metrics():
metrics = regression_metrics([i for i in range(1,10)],[i for i in range(11,20)])
| <filename>tests/utils/test_utilities.py
import numpy as np
import pytest
import time
from chemml.utils import list_del_indices
from chemml.utils import std_datetime_str
from chemml.utils import tot_exec_time_str
from chemml.utils import chunk
from chemml.utils import bool_formatter
from chemml.utils import regression_metrics
def test_list_del_indices():
mylist = list_del_indices([9,3,5,7,1], [4,2])
assert len(mylist) == 3
assert mylist == [9, 3, 7]
def test_std_datetime_str():
s = std_datetime_str(mode = 'datetime')
assert s[-3] == ':'
s = std_datetime_str(mode = 'date')
assert s[-3] == '-'
s = std_datetime_str(mode = 'time')
assert s[-3] == ':'
s = std_datetime_str(mode = 'datetime_ms')
assert s[4] == '-'
s = std_datetime_str(mode = 'time_ms')
assert s[2] == ':'
with pytest.raises(ValueError):
std_datetime_str(mode='hour')
def test_exec_time_str():
time_start = time.time()
time.sleep(0.5)
s = tot_exec_time_str(time_start)
assert int(s[-4]) >= 5
def test_chunk():
x = np.array(range(10))
y = np.array(range(20,30))
it = chunk(range(len(x)), 3, x, y)
x_chunk, y_chunk = next(it)
assert len(x_chunk) == 4
def test_bool_formatter():
bf_true = bool_formatter(True)
bf_false = bool_formatter(False)
assert bf_true == 'true'
assert bf_false == 'false'
def test_bool_formatter_exception():
with pytest.raises(ValueError):
bool_formatter('true')
def test_regression_metrics():
metrics = regression_metrics([i for i in range(1,10)],[i for i in range(11,20)])
| none | 1 | 2.040728 | 2 |
|
api/app/models/period.py | countable-web/queue-management | 0 | 6630098 | '''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from qsystem import db
from .base import Base
class Period(Base):
period_id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
sr_id = db.Column(db.Integer, db.ForeignKey('servicereq.sr_id'), nullable=False)
csr_id = db.Column(db.Integer, db.ForeignKey('csr.csr_id'), nullable=False)
reception_csr_ind = db.Column(db.Integer, nullable=False)
ps_id = db.Column(db.Integer, db.ForeignKey('periodstate.ps_id'), nullable=False)
time_start = db.Column(db.DateTime, nullable=False)
time_end = db.Column(db.DateTime, nullable=True)
csr = db.relationship("CSR", lazy='joined')
ps = db.relationship("PeriodState", lazy='joined')
sr = db.relationship("ServiceReq", lazy='joined')
def __repr__(self):
return '<Period id:(name={self.period_id!r})>'.format(self=self)
def __init__(self, **kwargs):
super(Period, self).__init__(**kwargs)
| '''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from qsystem import db
from .base import Base
class Period(Base):
period_id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
sr_id = db.Column(db.Integer, db.ForeignKey('servicereq.sr_id'), nullable=False)
csr_id = db.Column(db.Integer, db.ForeignKey('csr.csr_id'), nullable=False)
reception_csr_ind = db.Column(db.Integer, nullable=False)
ps_id = db.Column(db.Integer, db.ForeignKey('periodstate.ps_id'), nullable=False)
time_start = db.Column(db.DateTime, nullable=False)
time_end = db.Column(db.DateTime, nullable=True)
csr = db.relationship("CSR", lazy='joined')
ps = db.relationship("PeriodState", lazy='joined')
sr = db.relationship("ServiceReq", lazy='joined')
def __repr__(self):
return '<Period id:(name={self.period_id!r})>'.format(self=self)
def __init__(self, **kwargs):
super(Period, self).__init__(**kwargs)
| en | 0.863408 | Copyright 2018 Province of British Columbia Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.809609 | 2 |
hoodapp/migrations/0002_auto_20190808_0052.py | CollinsMuiruri/final-is | 0 | 6630099 | <filename>hoodapp/migrations/0002_auto_20190808_0052.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-07 21:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodapp', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='business',
options={'verbose_name_plural': 'Businesses'},
),
migrations.AlterModelOptions(
name='comments',
options={'verbose_name_plural': 'Comments'},
),
migrations.AlterModelOptions(
name='posts',
options={'verbose_name_plural': 'Posts'},
),
migrations.RemoveField(
model_name='profile',
name='hood',
),
]
| <filename>hoodapp/migrations/0002_auto_20190808_0052.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-07 21:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodapp', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='business',
options={'verbose_name_plural': 'Businesses'},
),
migrations.AlterModelOptions(
name='comments',
options={'verbose_name_plural': 'Comments'},
),
migrations.AlterModelOptions(
name='posts',
options={'verbose_name_plural': 'Posts'},
),
migrations.RemoveField(
model_name='profile',
name='hood',
),
]
| en | 0.748387 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-08-07 21:52 | 1.468505 | 1 |
src/cirrus/release.py | evansde77/cirrus | 12 | 6630100 | <reponame>evansde77/cirrus<gh_stars>10-100
#!/usr/bin/env python
"""
_release_
Implement git cirrus release command
"""
import os
import sys
import datetime
import itertools
from cirrus.invoke_helpers import local
import pluggage.registry
from argparse import ArgumentParser
from cirrus.configuration import load_configuration
from cirrus.environment import repo_directory
from cirrus.git_tools import build_release_notes
from cirrus.git_tools import has_unstaged_changes, current_branch
from cirrus.git_tools import branch, checkout_and_pull
from cirrus.git_tools import remote_branch_exists
from cirrus.git_tools import commit_files_optional_push
from cirrus.github_tools import GitHubContext, unmerged_releases
from cirrus.utils import update_file, update_version, max_version
from cirrus.logger import get_logger
from cirrus.plugins.jenkins import JenkinsClient
from cirrus.req_utils import bump_package
from cirrus.release_status import release_status
import cirrus.release_utils as rel_utils
LOGGER = get_logger()
def highlander(iterable):
"""check only single True value in iterable"""
# There Can Be Only One!!!
i = iter(iterable)
return any(i) and not any(i)
def parse_version(version):
"""
_parse_version_
Parse semantic major.minor.micro version string
:param version: X.Y.Z format version string
:returns: dictionary containing major, minor, micro versions
as integers
"""
split = version.split('.', 2)
return {
'major': int(split[0]),
'minor': int(split[1]),
'micro': int(split[2]),
}
def bump_version_field(version, field='major'):
"""
parse the version and update the major, minor and micro
version specified by field
Return the updated version string
"""
vers_params = parse_version(version)
vers_params[field] += 1
if field == 'major':
vers_params['minor'] = 0
vers_params['micro'] = 0
elif field == 'minor':
vers_params['micro'] = 0
return "{major}.{minor}.{micro}".format(**vers_params)
def artifact_name(config):
"""
given cirrus config, build the expected
artifact name
"""
artifact_name = "{0}-{1}.tar.gz".format(
config.package_name(),
config.package_version()
)
build_artifact = os.path.join(
os.getcwd(),
'dist',
artifact_name
)
return build_artifact
def egg_artifact_name(config):
"""
given cirrus config, build the expected
artifact name
"""
artifact_name = "{0}-{1}.tar.gz".format(
config.package_name(),
config.package_version()
)
build_artifact = os.path.join(
os.getcwd(),
'dist',
artifact_name
)
return build_artifact
def wheel_artifact_name(config):
"""
given cirrus config, build the expected
artifact name
"""
artifact_name = "{0}-{1}.tar.gz".format(
config.package_name(),
config.package_version()
)
build_artifact = os.path.join(
os.getcwd(),
'dist',
artifact_name
)
return build_artifact
parse_to_list = lambda s: [x.strip() for x in s.split(',') if x.strip()]
def release_branch_name(config):
"""
build expected release branch name from current config
"""
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
config.package_version()
)
return branch_name
def convert_bool(value):
"""helper to make sure bools are bools"""
if value in (True, False):
return value
if value is None:
return False
if str(value).lower() in ('true', '1'):
return True
return False
def get_plugin(plugin_name):
"""
_get_plugin_
Get the deploy plugin requested from the factory
"""
factory = pluggage.registry.get_factory(
'upload',
load_modules=['cirrus.plugins.uploaders']
)
return factory(plugin_name)
def release_config(config, opts):
"""
_release_config_
Extract and validate the release config parameters
from the cirrus config for the package
"""
release_config_defaults = {
'wait_on_ci': False,
'wait_on_ci_develop': False,
'wait_on_ci_master': False,
'wait_on_ci_timeout': 600,
'wait_on_ci_interval': 2,
'push_retry_attempts': 1,
'push_retry_cooloff': 0,
'github_context_string': None,
'update_github_context': False,
'develop_github_context_string': None,
'master_github_context_string': None,
'update_develop_github_context': False,
'update_master_github_context': False
}
release_config = {}
if 'release' not in config:
release_config = release_config_defaults
else:
for key, val in release_config_defaults.items():
release_config[key] = config.get_param('release', key, val)
release_config['wait_on_ci'] = convert_bool(release_config['wait_on_ci'])
release_config['wait_on_ci_develop'] = convert_bool(
release_config['wait_on_ci_develop']
)
release_config['wait_on_ci_master'] = convert_bool(
release_config['wait_on_ci_master']
)
if opts.wait_on_ci:
release_config['wait_on_ci'] = True
if opts.github_context_string:
release_config['update_github_context'] = True
release_config['github_context_string'] = opts.github_context_string
if opts.github_develop_context_string:
release_config['update_develop_github_context'] = True
release_config['github_develop_context_string'] = opts.github_develop_context_string
if opts.github_master_context_string:
release_config['update_master_github_context'] = True
release_config['github_master_context_string'] = opts.github_master_context_string
# validate argument types
release_config['wait_on_ci_timeout'] = int(
release_config['wait_on_ci_timeout']
)
release_config['wait_on_ci_interval'] = int(
release_config['wait_on_ci_interval']
)
release_config['update_github_context'] = convert_bool(
release_config['update_github_context']
)
release_config['push_retry_attempts'] = int(
release_config['push_retry_attempts']
)
release_config['push_retry_cooloff'] = int(
release_config['push_retry_cooloff']
)
if release_config['update_github_context']:
# require context string
if release_config['github_context_string'] is None:
msg = "if using update_github_context you must provide a github_context_string"
raise RuntimeError(msg)
release_config['github_context_string'] = parse_to_list(
release_config['github_context_string']
)
if release_config['update_develop_github_context']:
# require context string if release_config['github_develop_context_string'] is None:
if release_config['github_develop_context_string'] is None:
msg = "if using update_develop_github_context you must provide a github_context_string"
raise RuntimeError(msg)
release_config['github_develop_context_string'] = parse_to_list(
release_config['github_develop_context_string']
)
if release_config['update_master_github_context']:
# require context string
if release_config['github_master_context_string'] is None:
msg = "if using update_master_github_context you must provide a github_master_context_string"
raise RuntimeError(msg)
release_config['github_master_context_string'] = parse_to_list(
release_config['github_master_context_string']
)
return release_config
def build_parser(argslist):
"""
_build_parser_
Set up command line parser for the release command
"""
parser = ArgumentParser(
description='git cirrus release command'
)
parser.add_argument('command', nargs='?')
subparsers = parser.add_subparsers(dest='command')
new_command = subparsers.add_parser('new')
new_command.add_argument(
'--micro',
action='store_true',
dest='micro'
)
new_command.add_argument(
'--minor',
action='store_true',
dest='minor'
)
new_command.add_argument(
'--major',
action='store_true',
dest='major'
)
new_command.add_argument(
'--nightly',
action='store_true',
dest='nightly',
default=False
)
new_command.add_argument(
'--skip-existing',
default=False,
action='store_true',
help='Increment past any existing, unmerged release branches'
)
new_command.add_argument(
'--bump',
nargs=2,
action='append',
help='package versions to update in requirements.txt, eg --bump argparse 1.2.1 --bump womp 9.9.9'
)
new_command.add_argument(
'--no-remote',
action='store_true',
default=False,
help="dont push release branch to remote"
)
# borrow --micro/minor/major options from "new" command.
subparsers.add_parser('trigger', parents=[new_command], add_help=False)
new_version_command = subparsers.add_parser('new-version', parents=[new_command], add_help=False)
cleanup_command = subparsers.add_parser(
'cleanup'
)
cleanup_command.add_argument(
'--version', '-v',
help='version to cleanup, defaults to current release',
default=None
)
cleanup_command.add_argument(
'--no-remote',
help='Do not remove remote branch if set',
default=False,
action='store_true'
)
subparsers.add_parser('build')
status_command = subparsers.add_parser('status')
status_command.add_argument(
'--release',
help='check status of the provided release, defaults to current branch',
default=None
)
merge_command = subparsers.add_parser('merge')
merge_command.add_argument(
'--wait-on-ci',
action='store_true',
dest='wait_on_ci',
default=False,
help='Wait for GH CI status to be success before uploading'
)
merge_command.add_argument(
'--context-string',
default=None,
dest='github_context_string',
help='Update the github context string provided when pushed'
)
merge_command.add_argument(
'--develop-context-string',
default=None,
dest='github_develop_context_string',
help='Update the github context string for develop branch provided when pushed'
)
merge_command.add_argument(
'--master-context-string',
default=None,
dest='github_master_context_string',
help='Update the github context string for master branch provided when pushed'
)
merge_command.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
help='Clean up release branch after merging'
)
merge_command.add_argument(
'--skip-master',
action='store_true',
dest='skip_master',
default=False,
help='Skip the master merge and push'
)
merge_command.add_argument(
'--skip-develop',
action='store_true',
dest='skip_develop',
default=False,
help='Skip the develop merge and push'
)
merge_command.add_argument(
'--log-status',
action='store_true',
dest='log_status',
default=False,
help='log all status values for branches during command'
)
merge_command.add_argument(
'--no-remote',
help='Do not remove remote branch if set',
default=False,
action='store_true'
)
upload_command = subparsers.add_parser('upload')
upload_command.add_argument(
'--test',
action='store_true',
dest='test',
help='test only, do not actually push or upload'
)
upload_command.add_argument(
'--plugin',
dest='plugin',
default='pypi',
help='Uploader plugin to use'
)
upload_command.add_argument(
'--pypi-url', '-r',
action='store',
dest='pypi_url',
help='upload to specified pypi url'
)
upload_command.add_argument(
'--pypi-sudo',
action='store_true',
dest='pypi_sudo',
help='use sudo to upload build artifact to pypi'
)
upload_command.add_argument(
'--no-pypi-sudo',
action='store_false',
dest='pypi_sudo',
help='do not use sudo to upload build artifact to pypi'
)
upload_command.set_defaults(pypi_sudo=True)
opts = parser.parse_args(argslist)
return opts
def make_new_version(opts):
LOGGER.info("Updating package version...")
if not highlander([opts.major, opts.minor, opts.micro]):
msg = "Can only specify one of --major, --minor or --micro"
LOGGER.error(msg)
raise RuntimeError(msg)
fields = ['major', 'minor', 'micro']
mask = [opts.major, opts.minor, opts.micro]
field = [x for x in itertools.compress(fields, mask)][0]
config = load_configuration()
current_version = config.package_version()
# need to be on the latest develop
repo_dir = repo_directory()
curr_branch = current_branch(repo_dir)
# make sure repo is clean
if has_unstaged_changes(repo_dir):
msg = (
"Error: Unstaged changes are present on the branch {}"
"Please commit them or clean up before proceeding"
).format(curr_branch)
LOGGER.error(msg)
raise RuntimeError(msg)
#
# compute next version
#
if opts.skip_existing:
# skip any existing unmerged branches
unmerged = unmerged_releases(repo_dir, version_only=True)
if unmerged:
LOGGER.info(
(
"Skipping Existing Versions found "
"unmerged_releases: {}"
).format(
' '.join(unmerged)
)
)
unmerged.append(current_version)
current_version = max_version(*unmerged)
LOGGER.info(
"selected current version as {}".format(current_version)
)
new_version = bump_version_field(current_version, field)
msg = "Bumping version from {prev} to {new} on branch {branch}".format(
prev=current_version,
new=new_version,
branch=curr_branch
)
LOGGER.info(msg)
# update cirrus conf
config.update_package_version(new_version)
changes = ['cirrus.conf']
if opts.bump:
reqs_file = os.path.join(repo_dir, 'requirements.txt')
for pkg, version in opts.bump:
LOGGER.info("Bumping dependency {} to {}".format(pkg, version))
bump_package(reqs_file, pkg, version)
changes.append(reqs_file)
# update __version__ or equivalent
version_file, version_attr = config.version_file()
if version_file is not None:
LOGGER.info('Updating {0} attribute in {1}'.format(version_file, version_attr))
update_version(version_file, new_version, version_attr)
changes.append(version_file)
# update files changed
msg = "cirrus release: version bumped for {0}".format(curr_branch)
LOGGER.info('Committing files: {0}'.format(','.join(changes)))
LOGGER.info(msg)
commit_files_optional_push(repo_dir, msg, not opts.no_remote, *changes)
def new_release(opts):
"""
_new_release_
- Create a new release branch in the local repo
- Edit the conf to bump the version
- Edit the history file with release notes
"""
LOGGER.info("Creating new release...")
config = load_configuration()
current_version = config.package_version()
# need to be on the latest develop
repo_dir = repo_directory()
if opts.nightly:
msg = "creating new nightly release..."
new_version = rel_utils.new_nightly()
field = 'nightly'
else:
if not highlander([opts.major, opts.minor, opts.micro]):
msg = "Can only specify one of --major, --minor or --micro"
LOGGER.error(msg)
raise RuntimeError(msg)
fields = ['major', 'minor', 'micro']
mask = [opts.major, opts.minor, opts.micro]
field = [x for x in itertools.compress(fields, mask)][0]
curr = current_version
if opts.skip_existing:
# skip any existing unmerged branches
unmerged = unmerged_releases(repo_dir, version_only=True)
if unmerged:
LOGGER.info(
(
"Skipping Existing Versions found "
"unmerged_releases: {}"
).format(
' '.join(unmerged)
)
)
unmerged.append(current_version)
curr = max_version(*unmerged)
LOGGER.info(
"selected current version as {}".format(curr)
)
new_version = bump_version_field(curr, field)
# release branch
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
new_version
)
LOGGER.info('release branch is {0}'.format(branch_name))
# make sure the branch doesnt already exist on remote
if remote_branch_exists(repo_dir, branch_name):
msg = (
"Error: branch {branch_name} already exists on the remote repo "
"Please clean up that branch before proceeding\n"
"git branch -d {branch_name}\n"
"git push origin --delete {branch_name}\n"
).format(branch_name=branch_name)
LOGGER.error(msg)
raise RuntimeError(msg)
# make sure repo is clean
if has_unstaged_changes(repo_dir):
msg = (
"Error: Unstaged changes are present on the branch "
"Please commit them or clean up before proceeding"
)
LOGGER.error(msg)
raise RuntimeError(msg)
main_branch = config.gitflow_branch_name()
checkout_and_pull(repo_dir, main_branch, pull=not opts.no_remote)
# create release branch
branch(repo_dir, branch_name, main_branch)
# update cirrus conf
config.update_package_version(new_version)
changes = ['cirrus.conf']
if opts.bump:
reqs_file = os.path.join(repo_dir, 'requirements.txt')
for pkg, version in opts.bump:
LOGGER.info("Bumping dependency {} to {}".format(pkg, version))
bump_package(reqs_file, pkg, version)
changes.append(reqs_file)
# update release notes file
relnotes_file, relnotes_sentinel = config.release_notes()
if (relnotes_file is not None) and (relnotes_sentinel is not None):
LOGGER.info('Updating release notes in {0}'.format(relnotes_file))
relnotes = "Release: {0} Created: {1}\n".format(
new_version,
datetime.datetime.utcnow().isoformat()
)
relnotes += build_release_notes(
repo_dir,
current_version,
config.release_notes_format()
)
update_file(relnotes_file, relnotes_sentinel, relnotes)
changes.append(relnotes_file)
# update __version__ or equivalent
version_file, version_attr = config.version_file()
if version_file is not None:
LOGGER.info('Updating {0} attribute in {1}'.format(version_file, version_attr))
update_version(version_file, new_version, version_attr)
changes.append(version_file)
# update files changed
msg = "cirrus release: new release created for {0}".format(branch_name)
LOGGER.info('Committing files: {0}'.format(','.join(changes)))
LOGGER.info(msg)
commit_files_optional_push(repo_dir, msg, not opts.no_remote, *changes)
return (new_version, field)
def trigger_release(opts):
"""
_trigger_release_
Alias for "git cirrus release new --micro/minor/major.
- Run the "release new" command
- Capture the new version string
- Pass new version number to external build server
Requires the following sections and values in cirrus.conf:
[build-server]
name = jenkins
[jenkins]
url = http://localhost:8080
job = default
"""
config = load_configuration()
try:
build_server = config['build-server']['name']
build_server_config = config[build_server]
except KeyError:
msg = (
'[build-server] section is incomplete or missing from cirrus.conf. '
'Please see below for an example.\n'
'\n [build-server]'
'\n name = jenkins'
'\n [jenkins]'
'\n url = http://localhost:8080'
'\n job = default'
)
raise RuntimeError(msg)
new_version, release_level = new_release(opts)
if build_server == 'jenkins':
_trigger_jenkins_release(build_server_config,
new_version,
release_level)
def _trigger_jenkins_release(config, new_version, level):
"""
_trigger_jenkins_release_
Performs jenkins specific steps for launching a build job
"""
client = JenkinsClient(config['url'])
build_params = {
'LEVEL': level,
'VERSION': new_version,
}
response = client.start_job(config['job'], build_params)
if response.status_code != 201:
LOGGER.error(response.text)
raise RuntimeError('Jenkins HTTP API returned code {}'.format(response.status_code))
def upload_release(opts):
"""
_upload_release_
"""
LOGGER.info("Uploading release...")
config = load_configuration()
build_artifact = artifact_name(config)
LOGGER.info("Uploading artifact: {0}".format(build_artifact))
if not os.path.exists(build_artifact):
msg = (
"Expected build artifact: {0} Not Found, upload aborted\n"
"Did you run git cirrus release build?"
).format(build_artifact)
LOGGER.error(msg)
raise RuntimeError(msg)
# merge in release branches and tag, push to remote
tag = config.package_version()
LOGGER.info("Loading plugin {}".format(opts.plugin))
plugin = get_plugin(opts.plugin)
if opts.test:
LOGGER.info("Uploading {} to pypi disabled by test or option...".format(tag))
return
plugin.upload(opts, build_artifact)
return
def cleanup_release(opts):
"""
_cleanup_release_
Remove local and remote release branches if they exist
"""
config = load_configuration()
repo_dir = os.getcwd()
pfix = config.gitflow_release_prefix()
branch_name = release_branch_name(config)
if opts.version is not None:
if not opts.version.startswith(pfix):
branch_name = "{0}{1}".format(
pfix,
opts.version
)
else:
branch_name = opts.version
LOGGER.info("Cleaning release branches for {}".format(branch_name))
with GitHubContext(repo_dir) as ghc:
ghc.delete_branch(branch_name, not opts.no_remote)
def merge_release(opts):
"""
_merge_release_
Merge a release branch git flow style into master and develop
branches (or those configured for this package) and tag
master.
"""
config = load_configuration()
rel_conf = release_config(config, opts)
repo_dir = os.getcwd()
tag = config.package_version()
master = config.gitflow_master_name()
develop = config.gitflow_branch_name()
with GitHubContext(repo_dir) as ghc:
release_branch = ghc.active_branch_name
expected_branch = release_branch_name(config)
if release_branch != expected_branch:
msg = (
u"Not on the expected release branch according "
u"to cirrus.conf\n Expected:{0} but on {1}"
).format(expected_branch, release_branch)
LOGGER.error(msg)
raise RuntimeError(msg)
# merge release branch into master
LOGGER.info(u"Tagging and pushing {0}".format(tag))
if opts.skip_master:
LOGGER.info(u'Skipping merging to {}'.format(master))
if opts.skip_develop:
LOGGER.info(u'Skipping merging to {}'.format(develop))
if opts.log_status:
ghc.log_branch_status(master)
if not opts.skip_master:
sha = ghc.repo.head.ref.commit.hexsha
if rel_conf['wait_on_ci']:
#
# wait on release branch CI success
#
LOGGER.info(u"Waiting on CI build for {0}".format(release_branch))
ghc.wait_on_gh_status(
sha,
timeout=rel_conf['wait_on_ci_timeout'],
interval=rel_conf['wait_on_ci_interval']
)
LOGGER.info(u"Merging {} into {}".format(release_branch, master))
ghc.pull_branch(master, remote=not opts.no_remote)
ghc.merge_branch(release_branch)
sha = ghc.repo.head.ref.commit.hexsha
if rel_conf['wait_on_ci_master']:
#
# wait on release branch CI success
#
LOGGER.info(u"Waiting on CI build for {0}".format(master))
ghc.wait_on_gh_status(
sha,
timeout=rel_conf['wait_on_ci_timeout'],
interval=rel_conf['wait_on_ci_interval']
)
if rel_conf['update_github_context']:
for ctx in rel_conf['github_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if rel_conf['update_master_github_context']:
for ctx in rel_conf['github_master_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if not opts.no_remote:
ghc.push_branch_with_retry(
attempts=rel_conf['push_retry_attempts'],
cooloff=rel_conf['push_retry_cooloff']
)
LOGGER.info(u"Tagging {} as {}".format(master, tag))
ghc.tag_release(
tag,
master,
push=not opts.no_remote,
attempts=rel_conf['push_retry_attempts'],
cooloff=rel_conf['push_retry_cooloff']
)
LOGGER.info(u"Merging {} into {}".format(release_branch, develop))
if opts.log_status:
ghc.log_branch_status(develop)
if not opts.skip_develop:
ghc.pull_branch(develop, remote=not opts.no_remote)
ghc.merge_branch(release_branch)
if rel_utils.is_nightly(tag):
rel_utils.remove_nightly(ghc)
sha = ghc.repo.head.ref.commit.hexsha
if rel_conf['wait_on_ci_develop']:
#
# wait on release branch CI success
#
LOGGER.info(u"Waiting on CI build for {0}".format(develop))
ghc.wait_on_gh_status(
sha,
timeout=rel_conf['wait_on_ci_timeout'],
interval=rel_conf['wait_on_ci_interval']
)
if rel_conf['update_github_context']:
for ctx in rel_conf['github_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if rel_conf['update_develop_github_context']:
for ctx in rel_conf['github_develop_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if not opts.no_remote:
ghc.push_branch_with_retry(
attempts=rel_conf['push_retry_attempts'],
cooloff=rel_conf['push_retry_cooloff']
)
if opts.cleanup:
ghc.delete_branch(release_branch, remote=not opts.no_remote)
def show_release_status(opts):
"""check release status"""
release = opts.release
if release is None:
release = current_branch(repo_directory())
result = release_status(release)
if not result:
# unmerged/tagged release => exit as error status
sys.exit(1)
def build_release(opts):
"""
_build_release_
run python setup.py sdist to create the release artifact
"""
LOGGER.info("Building release...")
config = load_configuration()
local('python setup.py sdist')
build_artifact = artifact_name(config)
if not os.path.exists(build_artifact):
msg = "Expected build artifact: {0} Not Found".format(build_artifact)
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info("Release artifact created: {0}".format(build_artifact))
return build_artifact
def main():
opts = build_parser(sys.argv)
if opts.command == 'new':
new_release(opts)
if opts.command == 'new-version':
make_new_version(opts)
if opts.command == 'status':
show_release_status(opts)
if opts.command == 'trigger':
trigger_release(opts)
if opts.command == 'merge':
merge_release(opts)
if opts.command == 'upload':
upload_release(opts)
if opts.command == 'build':
build_release(opts)
if opts.command == 'cleanup':
cleanup_release(opts)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""
_release_
Implement git cirrus release command
"""
import os
import sys
import datetime
import itertools
from cirrus.invoke_helpers import local
import pluggage.registry
from argparse import ArgumentParser
from cirrus.configuration import load_configuration
from cirrus.environment import repo_directory
from cirrus.git_tools import build_release_notes
from cirrus.git_tools import has_unstaged_changes, current_branch
from cirrus.git_tools import branch, checkout_and_pull
from cirrus.git_tools import remote_branch_exists
from cirrus.git_tools import commit_files_optional_push
from cirrus.github_tools import GitHubContext, unmerged_releases
from cirrus.utils import update_file, update_version, max_version
from cirrus.logger import get_logger
from cirrus.plugins.jenkins import JenkinsClient
from cirrus.req_utils import bump_package
from cirrus.release_status import release_status
import cirrus.release_utils as rel_utils
LOGGER = get_logger()
def highlander(iterable):
"""check only single True value in iterable"""
# There Can Be Only One!!!
i = iter(iterable)
return any(i) and not any(i)
def parse_version(version):
"""
_parse_version_
Parse semantic major.minor.micro version string
:param version: X.Y.Z format version string
:returns: dictionary containing major, minor, micro versions
as integers
"""
split = version.split('.', 2)
return {
'major': int(split[0]),
'minor': int(split[1]),
'micro': int(split[2]),
}
def bump_version_field(version, field='major'):
"""
parse the version and update the major, minor and micro
version specified by field
Return the updated version string
"""
vers_params = parse_version(version)
vers_params[field] += 1
if field == 'major':
vers_params['minor'] = 0
vers_params['micro'] = 0
elif field == 'minor':
vers_params['micro'] = 0
return "{major}.{minor}.{micro}".format(**vers_params)
def artifact_name(config):
"""
given cirrus config, build the expected
artifact name
"""
artifact_name = "{0}-{1}.tar.gz".format(
config.package_name(),
config.package_version()
)
build_artifact = os.path.join(
os.getcwd(),
'dist',
artifact_name
)
return build_artifact
def egg_artifact_name(config):
"""
given cirrus config, build the expected
artifact name
"""
artifact_name = "{0}-{1}.tar.gz".format(
config.package_name(),
config.package_version()
)
build_artifact = os.path.join(
os.getcwd(),
'dist',
artifact_name
)
return build_artifact
def wheel_artifact_name(config):
"""
given cirrus config, build the expected
artifact name
"""
artifact_name = "{0}-{1}.tar.gz".format(
config.package_name(),
config.package_version()
)
build_artifact = os.path.join(
os.getcwd(),
'dist',
artifact_name
)
return build_artifact
parse_to_list = lambda s: [x.strip() for x in s.split(',') if x.strip()]
def release_branch_name(config):
"""
build expected release branch name from current config
"""
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
config.package_version()
)
return branch_name
def convert_bool(value):
"""helper to make sure bools are bools"""
if value in (True, False):
return value
if value is None:
return False
if str(value).lower() in ('true', '1'):
return True
return False
def get_plugin(plugin_name):
"""
_get_plugin_
Get the deploy plugin requested from the factory
"""
factory = pluggage.registry.get_factory(
'upload',
load_modules=['cirrus.plugins.uploaders']
)
return factory(plugin_name)
def release_config(config, opts):
"""
_release_config_
Extract and validate the release config parameters
from the cirrus config for the package
"""
release_config_defaults = {
'wait_on_ci': False,
'wait_on_ci_develop': False,
'wait_on_ci_master': False,
'wait_on_ci_timeout': 600,
'wait_on_ci_interval': 2,
'push_retry_attempts': 1,
'push_retry_cooloff': 0,
'github_context_string': None,
'update_github_context': False,
'develop_github_context_string': None,
'master_github_context_string': None,
'update_develop_github_context': False,
'update_master_github_context': False
}
release_config = {}
if 'release' not in config:
release_config = release_config_defaults
else:
for key, val in release_config_defaults.items():
release_config[key] = config.get_param('release', key, val)
release_config['wait_on_ci'] = convert_bool(release_config['wait_on_ci'])
release_config['wait_on_ci_develop'] = convert_bool(
release_config['wait_on_ci_develop']
)
release_config['wait_on_ci_master'] = convert_bool(
release_config['wait_on_ci_master']
)
if opts.wait_on_ci:
release_config['wait_on_ci'] = True
if opts.github_context_string:
release_config['update_github_context'] = True
release_config['github_context_string'] = opts.github_context_string
if opts.github_develop_context_string:
release_config['update_develop_github_context'] = True
release_config['github_develop_context_string'] = opts.github_develop_context_string
if opts.github_master_context_string:
release_config['update_master_github_context'] = True
release_config['github_master_context_string'] = opts.github_master_context_string
# validate argument types
release_config['wait_on_ci_timeout'] = int(
release_config['wait_on_ci_timeout']
)
release_config['wait_on_ci_interval'] = int(
release_config['wait_on_ci_interval']
)
release_config['update_github_context'] = convert_bool(
release_config['update_github_context']
)
release_config['push_retry_attempts'] = int(
release_config['push_retry_attempts']
)
release_config['push_retry_cooloff'] = int(
release_config['push_retry_cooloff']
)
if release_config['update_github_context']:
# require context string
if release_config['github_context_string'] is None:
msg = "if using update_github_context you must provide a github_context_string"
raise RuntimeError(msg)
release_config['github_context_string'] = parse_to_list(
release_config['github_context_string']
)
if release_config['update_develop_github_context']:
# require context string if release_config['github_develop_context_string'] is None:
if release_config['github_develop_context_string'] is None:
msg = "if using update_develop_github_context you must provide a github_context_string"
raise RuntimeError(msg)
release_config['github_develop_context_string'] = parse_to_list(
release_config['github_develop_context_string']
)
if release_config['update_master_github_context']:
# require context string
if release_config['github_master_context_string'] is None:
msg = "if using update_master_github_context you must provide a github_master_context_string"
raise RuntimeError(msg)
release_config['github_master_context_string'] = parse_to_list(
release_config['github_master_context_string']
)
return release_config
def build_parser(argslist):
"""
_build_parser_
Set up command line parser for the release command
"""
parser = ArgumentParser(
description='git cirrus release command'
)
parser.add_argument('command', nargs='?')
subparsers = parser.add_subparsers(dest='command')
new_command = subparsers.add_parser('new')
new_command.add_argument(
'--micro',
action='store_true',
dest='micro'
)
new_command.add_argument(
'--minor',
action='store_true',
dest='minor'
)
new_command.add_argument(
'--major',
action='store_true',
dest='major'
)
new_command.add_argument(
'--nightly',
action='store_true',
dest='nightly',
default=False
)
new_command.add_argument(
'--skip-existing',
default=False,
action='store_true',
help='Increment past any existing, unmerged release branches'
)
new_command.add_argument(
'--bump',
nargs=2,
action='append',
help='package versions to update in requirements.txt, eg --bump argparse 1.2.1 --bump womp 9.9.9'
)
new_command.add_argument(
'--no-remote',
action='store_true',
default=False,
help="dont push release branch to remote"
)
# borrow --micro/minor/major options from "new" command.
subparsers.add_parser('trigger', parents=[new_command], add_help=False)
new_version_command = subparsers.add_parser('new-version', parents=[new_command], add_help=False)
cleanup_command = subparsers.add_parser(
'cleanup'
)
cleanup_command.add_argument(
'--version', '-v',
help='version to cleanup, defaults to current release',
default=None
)
cleanup_command.add_argument(
'--no-remote',
help='Do not remove remote branch if set',
default=False,
action='store_true'
)
subparsers.add_parser('build')
status_command = subparsers.add_parser('status')
status_command.add_argument(
'--release',
help='check status of the provided release, defaults to current branch',
default=None
)
merge_command = subparsers.add_parser('merge')
merge_command.add_argument(
'--wait-on-ci',
action='store_true',
dest='wait_on_ci',
default=False,
help='Wait for GH CI status to be success before uploading'
)
merge_command.add_argument(
'--context-string',
default=None,
dest='github_context_string',
help='Update the github context string provided when pushed'
)
merge_command.add_argument(
'--develop-context-string',
default=None,
dest='github_develop_context_string',
help='Update the github context string for develop branch provided when pushed'
)
merge_command.add_argument(
'--master-context-string',
default=None,
dest='github_master_context_string',
help='Update the github context string for master branch provided when pushed'
)
merge_command.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
help='Clean up release branch after merging'
)
merge_command.add_argument(
'--skip-master',
action='store_true',
dest='skip_master',
default=False,
help='Skip the master merge and push'
)
merge_command.add_argument(
'--skip-develop',
action='store_true',
dest='skip_develop',
default=False,
help='Skip the develop merge and push'
)
merge_command.add_argument(
'--log-status',
action='store_true',
dest='log_status',
default=False,
help='log all status values for branches during command'
)
merge_command.add_argument(
'--no-remote',
help='Do not remove remote branch if set',
default=False,
action='store_true'
)
upload_command = subparsers.add_parser('upload')
upload_command.add_argument(
'--test',
action='store_true',
dest='test',
help='test only, do not actually push or upload'
)
upload_command.add_argument(
'--plugin',
dest='plugin',
default='pypi',
help='Uploader plugin to use'
)
upload_command.add_argument(
'--pypi-url', '-r',
action='store',
dest='pypi_url',
help='upload to specified pypi url'
)
upload_command.add_argument(
'--pypi-sudo',
action='store_true',
dest='pypi_sudo',
help='use sudo to upload build artifact to pypi'
)
upload_command.add_argument(
'--no-pypi-sudo',
action='store_false',
dest='pypi_sudo',
help='do not use sudo to upload build artifact to pypi'
)
upload_command.set_defaults(pypi_sudo=True)
opts = parser.parse_args(argslist)
return opts
def make_new_version(opts):
LOGGER.info("Updating package version...")
if not highlander([opts.major, opts.minor, opts.micro]):
msg = "Can only specify one of --major, --minor or --micro"
LOGGER.error(msg)
raise RuntimeError(msg)
fields = ['major', 'minor', 'micro']
mask = [opts.major, opts.minor, opts.micro]
field = [x for x in itertools.compress(fields, mask)][0]
config = load_configuration()
current_version = config.package_version()
# need to be on the latest develop
repo_dir = repo_directory()
curr_branch = current_branch(repo_dir)
# make sure repo is clean
if has_unstaged_changes(repo_dir):
msg = (
"Error: Unstaged changes are present on the branch {}"
"Please commit them or clean up before proceeding"
).format(curr_branch)
LOGGER.error(msg)
raise RuntimeError(msg)
#
# compute next version
#
if opts.skip_existing:
# skip any existing unmerged branches
unmerged = unmerged_releases(repo_dir, version_only=True)
if unmerged:
LOGGER.info(
(
"Skipping Existing Versions found "
"unmerged_releases: {}"
).format(
' '.join(unmerged)
)
)
unmerged.append(current_version)
current_version = max_version(*unmerged)
LOGGER.info(
"selected current version as {}".format(current_version)
)
new_version = bump_version_field(current_version, field)
msg = "Bumping version from {prev} to {new} on branch {branch}".format(
prev=current_version,
new=new_version,
branch=curr_branch
)
LOGGER.info(msg)
# update cirrus conf
config.update_package_version(new_version)
changes = ['cirrus.conf']
if opts.bump:
reqs_file = os.path.join(repo_dir, 'requirements.txt')
for pkg, version in opts.bump:
LOGGER.info("Bumping dependency {} to {}".format(pkg, version))
bump_package(reqs_file, pkg, version)
changes.append(reqs_file)
# update __version__ or equivalent
version_file, version_attr = config.version_file()
if version_file is not None:
LOGGER.info('Updating {0} attribute in {1}'.format(version_file, version_attr))
update_version(version_file, new_version, version_attr)
changes.append(version_file)
# update files changed
msg = "cirrus release: version bumped for {0}".format(curr_branch)
LOGGER.info('Committing files: {0}'.format(','.join(changes)))
LOGGER.info(msg)
commit_files_optional_push(repo_dir, msg, not opts.no_remote, *changes)
def new_release(opts):
"""
_new_release_
- Create a new release branch in the local repo
- Edit the conf to bump the version
- Edit the history file with release notes
"""
LOGGER.info("Creating new release...")
config = load_configuration()
current_version = config.package_version()
# need to be on the latest develop
repo_dir = repo_directory()
if opts.nightly:
msg = "creating new nightly release..."
new_version = rel_utils.new_nightly()
field = 'nightly'
else:
if not highlander([opts.major, opts.minor, opts.micro]):
msg = "Can only specify one of --major, --minor or --micro"
LOGGER.error(msg)
raise RuntimeError(msg)
fields = ['major', 'minor', 'micro']
mask = [opts.major, opts.minor, opts.micro]
field = [x for x in itertools.compress(fields, mask)][0]
curr = current_version
if opts.skip_existing:
# skip any existing unmerged branches
unmerged = unmerged_releases(repo_dir, version_only=True)
if unmerged:
LOGGER.info(
(
"Skipping Existing Versions found "
"unmerged_releases: {}"
).format(
' '.join(unmerged)
)
)
unmerged.append(current_version)
curr = max_version(*unmerged)
LOGGER.info(
"selected current version as {}".format(curr)
)
new_version = bump_version_field(curr, field)
# release branch
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
new_version
)
LOGGER.info('release branch is {0}'.format(branch_name))
# make sure the branch doesnt already exist on remote
if remote_branch_exists(repo_dir, branch_name):
msg = (
"Error: branch {branch_name} already exists on the remote repo "
"Please clean up that branch before proceeding\n"
"git branch -d {branch_name}\n"
"git push origin --delete {branch_name}\n"
).format(branch_name=branch_name)
LOGGER.error(msg)
raise RuntimeError(msg)
# make sure repo is clean
if has_unstaged_changes(repo_dir):
msg = (
"Error: Unstaged changes are present on the branch "
"Please commit them or clean up before proceeding"
)
LOGGER.error(msg)
raise RuntimeError(msg)
main_branch = config.gitflow_branch_name()
checkout_and_pull(repo_dir, main_branch, pull=not opts.no_remote)
# create release branch
branch(repo_dir, branch_name, main_branch)
# update cirrus conf
config.update_package_version(new_version)
changes = ['cirrus.conf']
if opts.bump:
reqs_file = os.path.join(repo_dir, 'requirements.txt')
for pkg, version in opts.bump:
LOGGER.info("Bumping dependency {} to {}".format(pkg, version))
bump_package(reqs_file, pkg, version)
changes.append(reqs_file)
# update release notes file
relnotes_file, relnotes_sentinel = config.release_notes()
if (relnotes_file is not None) and (relnotes_sentinel is not None):
LOGGER.info('Updating release notes in {0}'.format(relnotes_file))
relnotes = "Release: {0} Created: {1}\n".format(
new_version,
datetime.datetime.utcnow().isoformat()
)
relnotes += build_release_notes(
repo_dir,
current_version,
config.release_notes_format()
)
update_file(relnotes_file, relnotes_sentinel, relnotes)
changes.append(relnotes_file)
# update __version__ or equivalent
version_file, version_attr = config.version_file()
if version_file is not None:
LOGGER.info('Updating {0} attribute in {1}'.format(version_file, version_attr))
update_version(version_file, new_version, version_attr)
changes.append(version_file)
# update files changed
msg = "cirrus release: new release created for {0}".format(branch_name)
LOGGER.info('Committing files: {0}'.format(','.join(changes)))
LOGGER.info(msg)
commit_files_optional_push(repo_dir, msg, not opts.no_remote, *changes)
return (new_version, field)
def trigger_release(opts):
"""
_trigger_release_
Alias for "git cirrus release new --micro/minor/major.
- Run the "release new" command
- Capture the new version string
- Pass new version number to external build server
Requires the following sections and values in cirrus.conf:
[build-server]
name = jenkins
[jenkins]
url = http://localhost:8080
job = default
"""
config = load_configuration()
try:
build_server = config['build-server']['name']
build_server_config = config[build_server]
except KeyError:
msg = (
'[build-server] section is incomplete or missing from cirrus.conf. '
'Please see below for an example.\n'
'\n [build-server]'
'\n name = jenkins'
'\n [jenkins]'
'\n url = http://localhost:8080'
'\n job = default'
)
raise RuntimeError(msg)
new_version, release_level = new_release(opts)
if build_server == 'jenkins':
_trigger_jenkins_release(build_server_config,
new_version,
release_level)
def _trigger_jenkins_release(config, new_version, level):
"""
_trigger_jenkins_release_
Performs jenkins specific steps for launching a build job
"""
client = JenkinsClient(config['url'])
build_params = {
'LEVEL': level,
'VERSION': new_version,
}
response = client.start_job(config['job'], build_params)
if response.status_code != 201:
LOGGER.error(response.text)
raise RuntimeError('Jenkins HTTP API returned code {}'.format(response.status_code))
def upload_release(opts):
"""
_upload_release_
"""
LOGGER.info("Uploading release...")
config = load_configuration()
build_artifact = artifact_name(config)
LOGGER.info("Uploading artifact: {0}".format(build_artifact))
if not os.path.exists(build_artifact):
msg = (
"Expected build artifact: {0} Not Found, upload aborted\n"
"Did you run git cirrus release build?"
).format(build_artifact)
LOGGER.error(msg)
raise RuntimeError(msg)
# merge in release branches and tag, push to remote
tag = config.package_version()
LOGGER.info("Loading plugin {}".format(opts.plugin))
plugin = get_plugin(opts.plugin)
if opts.test:
LOGGER.info("Uploading {} to pypi disabled by test or option...".format(tag))
return
plugin.upload(opts, build_artifact)
return
def cleanup_release(opts):
"""
_cleanup_release_
Remove local and remote release branches if they exist
"""
config = load_configuration()
repo_dir = os.getcwd()
pfix = config.gitflow_release_prefix()
branch_name = release_branch_name(config)
if opts.version is not None:
if not opts.version.startswith(pfix):
branch_name = "{0}{1}".format(
pfix,
opts.version
)
else:
branch_name = opts.version
LOGGER.info("Cleaning release branches for {}".format(branch_name))
with GitHubContext(repo_dir) as ghc:
ghc.delete_branch(branch_name, not opts.no_remote)
def merge_release(opts):
"""
_merge_release_
Merge a release branch git flow style into master and develop
branches (or those configured for this package) and tag
master.
"""
config = load_configuration()
rel_conf = release_config(config, opts)
repo_dir = os.getcwd()
tag = config.package_version()
master = config.gitflow_master_name()
develop = config.gitflow_branch_name()
with GitHubContext(repo_dir) as ghc:
release_branch = ghc.active_branch_name
expected_branch = release_branch_name(config)
if release_branch != expected_branch:
msg = (
u"Not on the expected release branch according "
u"to cirrus.conf\n Expected:{0} but on {1}"
).format(expected_branch, release_branch)
LOGGER.error(msg)
raise RuntimeError(msg)
# merge release branch into master
LOGGER.info(u"Tagging and pushing {0}".format(tag))
if opts.skip_master:
LOGGER.info(u'Skipping merging to {}'.format(master))
if opts.skip_develop:
LOGGER.info(u'Skipping merging to {}'.format(develop))
if opts.log_status:
ghc.log_branch_status(master)
if not opts.skip_master:
sha = ghc.repo.head.ref.commit.hexsha
if rel_conf['wait_on_ci']:
#
# wait on release branch CI success
#
LOGGER.info(u"Waiting on CI build for {0}".format(release_branch))
ghc.wait_on_gh_status(
sha,
timeout=rel_conf['wait_on_ci_timeout'],
interval=rel_conf['wait_on_ci_interval']
)
LOGGER.info(u"Merging {} into {}".format(release_branch, master))
ghc.pull_branch(master, remote=not opts.no_remote)
ghc.merge_branch(release_branch)
sha = ghc.repo.head.ref.commit.hexsha
if rel_conf['wait_on_ci_master']:
#
# wait on release branch CI success
#
LOGGER.info(u"Waiting on CI build for {0}".format(master))
ghc.wait_on_gh_status(
sha,
timeout=rel_conf['wait_on_ci_timeout'],
interval=rel_conf['wait_on_ci_interval']
)
if rel_conf['update_github_context']:
for ctx in rel_conf['github_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if rel_conf['update_master_github_context']:
for ctx in rel_conf['github_master_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if not opts.no_remote:
ghc.push_branch_with_retry(
attempts=rel_conf['push_retry_attempts'],
cooloff=rel_conf['push_retry_cooloff']
)
LOGGER.info(u"Tagging {} as {}".format(master, tag))
ghc.tag_release(
tag,
master,
push=not opts.no_remote,
attempts=rel_conf['push_retry_attempts'],
cooloff=rel_conf['push_retry_cooloff']
)
LOGGER.info(u"Merging {} into {}".format(release_branch, develop))
if opts.log_status:
ghc.log_branch_status(develop)
if not opts.skip_develop:
ghc.pull_branch(develop, remote=not opts.no_remote)
ghc.merge_branch(release_branch)
if rel_utils.is_nightly(tag):
rel_utils.remove_nightly(ghc)
sha = ghc.repo.head.ref.commit.hexsha
if rel_conf['wait_on_ci_develop']:
#
# wait on release branch CI success
#
LOGGER.info(u"Waiting on CI build for {0}".format(develop))
ghc.wait_on_gh_status(
sha,
timeout=rel_conf['wait_on_ci_timeout'],
interval=rel_conf['wait_on_ci_interval']
)
if rel_conf['update_github_context']:
for ctx in rel_conf['github_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if rel_conf['update_develop_github_context']:
for ctx in rel_conf['github_develop_context_string']:
LOGGER.info(u"Setting {} for {}".format(
ctx,
sha)
)
ghc.set_branch_state(
'success',
ctx,
branch=sha
)
if not opts.no_remote:
ghc.push_branch_with_retry(
attempts=rel_conf['push_retry_attempts'],
cooloff=rel_conf['push_retry_cooloff']
)
if opts.cleanup:
ghc.delete_branch(release_branch, remote=not opts.no_remote)
def show_release_status(opts):
"""check release status"""
release = opts.release
if release is None:
release = current_branch(repo_directory())
result = release_status(release)
if not result:
# unmerged/tagged release => exit as error status
sys.exit(1)
def build_release(opts):
"""
_build_release_
run python setup.py sdist to create the release artifact
"""
LOGGER.info("Building release...")
config = load_configuration()
local('python setup.py sdist')
build_artifact = artifact_name(config)
if not os.path.exists(build_artifact):
msg = "Expected build artifact: {0} Not Found".format(build_artifact)
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info("Release artifact created: {0}".format(build_artifact))
return build_artifact
def main():
opts = build_parser(sys.argv)
if opts.command == 'new':
new_release(opts)
if opts.command == 'new-version':
make_new_version(opts)
if opts.command == 'status':
show_release_status(opts)
if opts.command == 'trigger':
trigger_release(opts)
if opts.command == 'merge':
merge_release(opts)
if opts.command == 'upload':
upload_release(opts)
if opts.command == 'build':
build_release(opts)
if opts.command == 'cleanup':
cleanup_release(opts)
if __name__ == '__main__':
main() | en | 0.722685 | #!/usr/bin/env python _release_ Implement git cirrus release command check only single True value in iterable # There Can Be Only One!!! _parse_version_ Parse semantic major.minor.micro version string :param version: X.Y.Z format version string :returns: dictionary containing major, minor, micro versions as integers parse the version and update the major, minor and micro version specified by field Return the updated version string given cirrus config, build the expected artifact name given cirrus config, build the expected artifact name given cirrus config, build the expected artifact name build expected release branch name from current config helper to make sure bools are bools _get_plugin_ Get the deploy plugin requested from the factory _release_config_ Extract and validate the release config parameters from the cirrus config for the package # validate argument types # require context string # require context string if release_config['github_develop_context_string'] is None: # require context string _build_parser_ Set up command line parser for the release command # borrow --micro/minor/major options from "new" command. # need to be on the latest develop # make sure repo is clean # # compute next version # # skip any existing unmerged branches # update cirrus conf # update __version__ or equivalent # update files changed _new_release_ - Create a new release branch in the local repo - Edit the conf to bump the version - Edit the history file with release notes # need to be on the latest develop # skip any existing unmerged branches # release branch # make sure the branch doesnt already exist on remote # make sure repo is clean # create release branch # update cirrus conf # update release notes file # update __version__ or equivalent # update files changed _trigger_release_ Alias for "git cirrus release new --micro/minor/major. - Run the "release new" command - Capture the new version string - Pass new version number to external build server Requires the following sections and values in cirrus.conf: [build-server] name = jenkins [jenkins] url = http://localhost:8080 job = default _trigger_jenkins_release_ Performs jenkins specific steps for launching a build job _upload_release_ # merge in release branches and tag, push to remote _cleanup_release_ Remove local and remote release branches if they exist _merge_release_ Merge a release branch git flow style into master and develop branches (or those configured for this package) and tag master. # merge release branch into master # # wait on release branch CI success # # # wait on release branch CI success # # # wait on release branch CI success # check release status # unmerged/tagged release => exit as error status _build_release_ run python setup.py sdist to create the release artifact | 2.277344 | 2 |
clean_aws/datasets/scalars.py | SamuelDiai/Dash-Website | 0 | 6630101 | <gh_stars>0
from re import S, sub
from socket import CAN_RAW_RECV_OWN_MSGS
import pandas as pd
from tqdm import tqdm
from dash_website.utils.aws_loader import load_csv
DIMENSION_TO_NAME = {
("Brain", "MRI", "GreyMatterVolumes"): "BrainGreyMatterVolumes",
("Brain", "MRI", "SubcorticalVolumes"): "BrainSubcorticalVolumes",
("Brain", "MRI", "dMRIWeightedMeans"): "BraindMRIWeightedMeans",
("Brain", "MRI", "AllScalars"): "BrainMRIAllBiomarkers",
("Brain", "Cognitive", "ReactionTime"): "CognitiveReactionTime",
("Brain", "Cognitive", "MatrixPatternCompletion"): "CognitiveMatrixPatternCompletion",
("Brain", "Cognitive", "TowerRearranging"): "CognitiveTowerRearranging",
("Brain", "Cognitive", "SymbolDigitSubstitution"): "CognitiveSymbolDigitSubstitution",
("Brain", "Cognitive", "PairedAssociativeLearning"): "CognitivePairedAssociativeLearning",
("Brain", "Cognitive", "ProspectiveMemory"): "CognitiveProspectiveMemory",
("Brain", "Cognitive", "NumericMemory"): "CognitiveNumericMemory",
("Brain", "Cognitive", "FluidIntelligence"): "CognitiveFluidIntelligence",
("Brain", "Cognitive", "TrailMaking"): "CognitiveTrailMaking",
("Brain", "Cognitive", "PairsMatching"): "CognitivePairsMatching",
("Brain", "Cognitive", "AllScalars"): "CognitiveAllBiomarkers",
("Brain", "All", "Scalars"): "BrainAndCognitive",
("Eyes", "Autorefraction", "Scalars"): "EyeAutorefraction",
("Eyes", "Acuity", "Scalars"): "EyeAcuity",
("Eyes", "IntraocularPressure", "Scalars"): "EyeIntraocularPressure",
("Eyes", "All", "Scalars"): "EyesAllBiomarkers",
("Hearing", "HearingTest", "Scalars"): "HearingTest",
("Lungs", "Spirometry", "Scalars"): "Spirometry",
("Arterial", "BloodPressure", "Scalars"): "BloodPressure",
("Arterial", "Carotids", "Scalars"): "CarotidUltrasound",
("Arterial", "PWA", "Scalars"): "ArterialStiffness",
("Arterial", "All", "Scalars"): "VascularAllBiomarkers",
("Heart", "All", "Scalars"): "HeartAllBiomarkers",
("Heart", "MRI", "Size"): "HeartSize",
("Heart", "MRI", "PWA"): "HeartPWA",
("Heart", "MRI", "AllScalars"): "HeartMRIAll",
("Heart", "ECG", "Scalars"): "ECGAtRest",
("Musculoskeletal", "Scalars", "Impedance"): "AnthropometryImpedance",
("Musculoskeletal", "Scalars", "Anthropometry"): "AnthropometryBodySize",
("Musculoskeletal", "Scalars", "HeelBoneDensitometry"): "BoneDensitometryOfHeel",
("Musculoskeletal", "Scalars", "HandGripStrength"): "HandGripStrength",
("Musculoskeletal", "Scalars", "AllScalars"): "MusculoskeletalAllBiomarkers",
("Biochemistry", "Blood", "Scalars"): "BloodBiochemistry",
("Biochemistry", "Urine", "Scalars"): "UrineBiochemistry",
("Biochemistry", "All", "Scalars"): "Biochemistry",
("ImmuneSystem", "BloodCount", "Scalars"): "BloodCount",
("PhysicalActivity", "FullWeek", "Scalars"): "PhysicalActivity",
("Demographics", "All", "Scalars"): "Demographics",
}
if __name__ == "__main__":
for dimension, subdimension, sub_subdimension in tqdm(DIMENSION_TO_NAME.keys()):
print(dimension, subdimension, sub_subdimension)
name = DIMENSION_TO_NAME[(dimension, subdimension, sub_subdimension)]
if dimension == "ImmuneSystem":
new_dimension = "BloodCells"
else:
new_dimension = dimension
if dimension != "PhysicalActivity":
raw_scalars = load_csv(f"page1_biomarkers/BiomarkerDatasets/{name}_ethnicity.csv").set_index("id")
else:
raw_scalars = load_csv(f"page1_biomarkers/BiomarkerDatasets/{name}_short.csv").set_index("id")
rename_columns = {"Sex": "sex", "Age when attended assessment centre": "chronological_age"}
for feature in raw_scalars.columns[raw_scalars.columns.str.contains(".0")]:
rename_columns[feature] = feature.replace(".0", "")
scalars = pd.DataFrame(None, index=raw_scalars.index, columns=list(rename_columns.keys()))
scalars[list(rename_columns.keys())] = raw_scalars[list(rename_columns.keys())]
if (dimension, subdimension, sub_subdimension) == ("PhysicalActivity", "FullWeek", "Scalars"):
columns_ethinicities = (
raw_scalars.columns[raw_scalars.columns.str.startswith("Ethnicity")]
.to_series()
.apply(lambda column: column.split(".")[1])
.values
)
scalars[columns_ethinicities] = raw_scalars[
raw_scalars.columns[raw_scalars.columns.str.startswith("Ethnicity")]
]
columns_others = raw_scalars.columns[
~(
raw_scalars.columns.isin(list(rename_columns.keys()) + ["eid"])
| raw_scalars.columns.str.contains("_r")
| raw_scalars.columns.str.startswith("Ethnicity")
)
]
scalars[columns_others] = raw_scalars[columns_others]
else:
columns_ethinicities_and_others = raw_scalars.columns[
~(
raw_scalars.columns.isin(list(rename_columns.keys()) + ["eid"])
| raw_scalars.columns.str.contains("_r")
)
]
scalars[columns_ethinicities_and_others] = raw_scalars[columns_ethinicities_and_others]
if (dimension, subdimension, sub_subdimension) == ("Arterial", "All", "Scalars"):
change_second_pulse_rate = scalars.rename(columns=rename_columns).columns.tolist()
change_second_pulse_rate[
scalars.rename(columns=rename_columns).columns.duplicated().argmax()
] = "Pulse rate.0"
scalars.columns = change_second_pulse_rate
scalars.reset_index().to_feather(
f"all_data/datasets/scalars/{new_dimension}_{subdimension}_{sub_subdimension}.feather"
)
else:
scalars.rename(columns=rename_columns).reset_index().to_feather(
f"all_data/datasets/scalars/{new_dimension}_{subdimension}_{sub_subdimension}.feather"
)
| from re import S, sub
from socket import CAN_RAW_RECV_OWN_MSGS
import pandas as pd
from tqdm import tqdm
from dash_website.utils.aws_loader import load_csv
DIMENSION_TO_NAME = {
("Brain", "MRI", "GreyMatterVolumes"): "BrainGreyMatterVolumes",
("Brain", "MRI", "SubcorticalVolumes"): "BrainSubcorticalVolumes",
("Brain", "MRI", "dMRIWeightedMeans"): "BraindMRIWeightedMeans",
("Brain", "MRI", "AllScalars"): "BrainMRIAllBiomarkers",
("Brain", "Cognitive", "ReactionTime"): "CognitiveReactionTime",
("Brain", "Cognitive", "MatrixPatternCompletion"): "CognitiveMatrixPatternCompletion",
("Brain", "Cognitive", "TowerRearranging"): "CognitiveTowerRearranging",
("Brain", "Cognitive", "SymbolDigitSubstitution"): "CognitiveSymbolDigitSubstitution",
("Brain", "Cognitive", "PairedAssociativeLearning"): "CognitivePairedAssociativeLearning",
("Brain", "Cognitive", "ProspectiveMemory"): "CognitiveProspectiveMemory",
("Brain", "Cognitive", "NumericMemory"): "CognitiveNumericMemory",
("Brain", "Cognitive", "FluidIntelligence"): "CognitiveFluidIntelligence",
("Brain", "Cognitive", "TrailMaking"): "CognitiveTrailMaking",
("Brain", "Cognitive", "PairsMatching"): "CognitivePairsMatching",
("Brain", "Cognitive", "AllScalars"): "CognitiveAllBiomarkers",
("Brain", "All", "Scalars"): "BrainAndCognitive",
("Eyes", "Autorefraction", "Scalars"): "EyeAutorefraction",
("Eyes", "Acuity", "Scalars"): "EyeAcuity",
("Eyes", "IntraocularPressure", "Scalars"): "EyeIntraocularPressure",
("Eyes", "All", "Scalars"): "EyesAllBiomarkers",
("Hearing", "HearingTest", "Scalars"): "HearingTest",
("Lungs", "Spirometry", "Scalars"): "Spirometry",
("Arterial", "BloodPressure", "Scalars"): "BloodPressure",
("Arterial", "Carotids", "Scalars"): "CarotidUltrasound",
("Arterial", "PWA", "Scalars"): "ArterialStiffness",
("Arterial", "All", "Scalars"): "VascularAllBiomarkers",
("Heart", "All", "Scalars"): "HeartAllBiomarkers",
("Heart", "MRI", "Size"): "HeartSize",
("Heart", "MRI", "PWA"): "HeartPWA",
("Heart", "MRI", "AllScalars"): "HeartMRIAll",
("Heart", "ECG", "Scalars"): "ECGAtRest",
("Musculoskeletal", "Scalars", "Impedance"): "AnthropometryImpedance",
("Musculoskeletal", "Scalars", "Anthropometry"): "AnthropometryBodySize",
("Musculoskeletal", "Scalars", "HeelBoneDensitometry"): "BoneDensitometryOfHeel",
("Musculoskeletal", "Scalars", "HandGripStrength"): "HandGripStrength",
("Musculoskeletal", "Scalars", "AllScalars"): "MusculoskeletalAllBiomarkers",
("Biochemistry", "Blood", "Scalars"): "BloodBiochemistry",
("Biochemistry", "Urine", "Scalars"): "UrineBiochemistry",
("Biochemistry", "All", "Scalars"): "Biochemistry",
("ImmuneSystem", "BloodCount", "Scalars"): "BloodCount",
("PhysicalActivity", "FullWeek", "Scalars"): "PhysicalActivity",
("Demographics", "All", "Scalars"): "Demographics",
}
if __name__ == "__main__":
for dimension, subdimension, sub_subdimension in tqdm(DIMENSION_TO_NAME.keys()):
print(dimension, subdimension, sub_subdimension)
name = DIMENSION_TO_NAME[(dimension, subdimension, sub_subdimension)]
if dimension == "ImmuneSystem":
new_dimension = "BloodCells"
else:
new_dimension = dimension
if dimension != "PhysicalActivity":
raw_scalars = load_csv(f"page1_biomarkers/BiomarkerDatasets/{name}_ethnicity.csv").set_index("id")
else:
raw_scalars = load_csv(f"page1_biomarkers/BiomarkerDatasets/{name}_short.csv").set_index("id")
rename_columns = {"Sex": "sex", "Age when attended assessment centre": "chronological_age"}
for feature in raw_scalars.columns[raw_scalars.columns.str.contains(".0")]:
rename_columns[feature] = feature.replace(".0", "")
scalars = pd.DataFrame(None, index=raw_scalars.index, columns=list(rename_columns.keys()))
scalars[list(rename_columns.keys())] = raw_scalars[list(rename_columns.keys())]
if (dimension, subdimension, sub_subdimension) == ("PhysicalActivity", "FullWeek", "Scalars"):
columns_ethinicities = (
raw_scalars.columns[raw_scalars.columns.str.startswith("Ethnicity")]
.to_series()
.apply(lambda column: column.split(".")[1])
.values
)
scalars[columns_ethinicities] = raw_scalars[
raw_scalars.columns[raw_scalars.columns.str.startswith("Ethnicity")]
]
columns_others = raw_scalars.columns[
~(
raw_scalars.columns.isin(list(rename_columns.keys()) + ["eid"])
| raw_scalars.columns.str.contains("_r")
| raw_scalars.columns.str.startswith("Ethnicity")
)
]
scalars[columns_others] = raw_scalars[columns_others]
else:
columns_ethinicities_and_others = raw_scalars.columns[
~(
raw_scalars.columns.isin(list(rename_columns.keys()) + ["eid"])
| raw_scalars.columns.str.contains("_r")
)
]
scalars[columns_ethinicities_and_others] = raw_scalars[columns_ethinicities_and_others]
if (dimension, subdimension, sub_subdimension) == ("Arterial", "All", "Scalars"):
change_second_pulse_rate = scalars.rename(columns=rename_columns).columns.tolist()
change_second_pulse_rate[
scalars.rename(columns=rename_columns).columns.duplicated().argmax()
] = "Pulse rate.0"
scalars.columns = change_second_pulse_rate
scalars.reset_index().to_feather(
f"all_data/datasets/scalars/{new_dimension}_{subdimension}_{sub_subdimension}.feather"
)
else:
scalars.rename(columns=rename_columns).reset_index().to_feather(
f"all_data/datasets/scalars/{new_dimension}_{subdimension}_{sub_subdimension}.feather"
) | none | 1 | 1.863037 | 2 |
|
cyp/data/exporting.py | DaikiOnodera/pycrop-yield-prediction | 93 | 6630102 | <filename>cyp/data/exporting.py<gh_stars>10-100
import ee
import ssl
import time
from pathlib import Path
import numpy as np
from .utils import load_clean_yield_data as load
from .utils import get_tif_files
from .. import MAJOR_STATES
class MODISExporter:
""" A class to export MODIS data from
the Google Earth Engine to Google Drive
Parameters
----------
locations_filepath: pathlib Path, default=Path('data/yield_data.csv')
A path to the yield data
collection_id: str, default='MODIS/051/MCD12Q1'
The ID Earth Engine Image Collection being exported
"""
def __init__(self, locations_filepath=Path('data/yield_data.csv'),
collection_id='MODIS/051/MCD12Q1'):
self.locations = load(locations_filepath)
self.collection_id = collection_id
try:
ee.Initialize()
print('The Earth Engine package initialized successfully!')
except ee.EEException:
print('The Earth Engine package failed to initialize! '
'Have you authenticated the earth engine?')
def update_parameters(self, locations_filepath=None, collection_id=None):
"""
Update the locations file or the collection id
"""
if locations_filepath is not None:
self.locations = load(locations_filepath)
if collection_id is not None:
self.collection_id = collection_id
@staticmethod
def _export_one_image(img, folder, name, region, scale, crs):
# export one image from Earth Engine to Google Drive
# Author: <NAME>, https://github.com/JiaxuanYou
print(f'Exporting to {folder}/{name}')
task_dict = {
'driveFolder': folder,
'driveFileNamePrefix': name,
'scale': scale,
'crs': crs
}
if region is not None:
task_dict.update({
'region': region
})
task = ee.batch.Export.image(img, name, task_dict)
task.start()
while task.status()['state'] == 'RUNNING':
print('Running...')
# Perhaps task.cancel() at some point.
time.sleep(10)
print(f'Done: {task.status()}')
def export(self, folder_name, data_type, coordinate_system='EPSG:4326', scale=500,
export_limit=None, min_img_val=None, max_img_val=None, major_states_only=True,
check_if_done=False, download_folder=None):
"""Export an Image Collection from Earth Engine to Google Drive
Parameters
----------
folder_name: str
The name of the folder to export the images to in
Google Drive. If the folder is not there, this process
creates it
data_type: str {'image', 'mask', 'temperature'}
The type of data we are collecting. This tells us which bands to collect.
coordinate_system: str, default='EPSG:4326'
The coordinate system in which to export the data
scale: int, default=500
The pixel resolution, as determined by the output.
https://developers.google.com/earth-engine/scale
export_limit: int or None, default=None
If not none, limits the number of files exported to the value
passed.
min_img_val = int or None:
A minimum value to clip the band values to
max_img_val: int or None
A maximum value to clip the band values to
major_states_only: boolean, default=True
Whether to only use the 11 states responsible for 75 % of national soybean
production, as is done in the paper
check_if_done: boolean, default=False
If true, will check download_folder for any .tif files which have already been
downloaded, and won't export them again. This effectively allows for
checkpointing, and prevents all files from having to be downloaded at once.
download_folder: None or pathlib Path, default=None
Which folder to check for downloaded files, if check_if_done=True. If None, looks
in data/folder_name
"""
if check_if_done:
if download_folder is None:
download_folder = Path('data') / folder_name
already_downloaded = get_tif_files(download_folder)
imgcoll = ee.ImageCollection(self.collection_id) \
.filterBounds(ee.Geometry.Rectangle(-106.5, 50, -64, 23)) \
.filterDate('2002-12-31', '2016-8-4')
datatype_to_func = {
'image': _append_im_band,
'mask': _append_mask_band,
'temperature': _append_temp_band,
}
img = imgcoll.iterate(datatype_to_func[data_type])
img = ee.Image(img)
# "clip" the values of the bands
if min_img_val is not None:
# passing en ee.Number creates a constant image
img_min = ee.Image(ee.Number(min_img_val))
img = img.min(img_min)
if max_img_val is not None:
img_max = ee.Image(ee.Number(max_img_val))
img = img.max(img_max)
# note that the county regions are pulled from Google's Fusion tables. This calls a merge
# of county geometry and census data:
# https://fusiontables.google.com/data?docid=1S4EB6319wWW2sWQDPhDvmSBIVrD3iEmCLYB7nMM#rows:id=1
region = ee.FeatureCollection('TIGER/2018/Counties')
# turn the strings into numbers, see
# https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_Counties
def state_to_int(feature):
return feature.set('COUNTYFP', ee.Number.parse(feature.get('COUNTYFP')))
region = region.map(state_to_int)
count = 0
for state_id, county_id in np.unique(self.locations[['State ANSI', 'County ANSI']].values, axis=0):
if major_states_only:
if int(state_id) not in MAJOR_STATES:
print(f'Skipping state id {int(state_id)}')
continue
fname = '{}_{}'.format(int(state_id), int(county_id))
if check_if_done:
if f'{fname}.tif' in already_downloaded:
print(f'{fname}.tif already downloaded! Skipping')
continue
file_region = region.filterMetadata('COUNTYFP', 'equals', int(county_id))
file_region = ee.Feature(file_region.first())
processed_img = img.clip(file_region)
file_region = None
while True:
try:
self._export_one_image(processed_img, folder_name, fname, file_region, scale, coordinate_system)
except (ee.ee_exception.EEException, ssl.SSLEOFError):
print(f'Retrying State {int(state_id)}, County {int(county_id)}')
time.sleep(10)
continue
break
count += 1
if export_limit:
if count >= export_limit:
print('Reached export limit! Stopping')
break
print(f'Finished Exporting {count} files!')
def export_all(self, export_limit=None, major_states_only=True, check_if_done=True,
download_folder=None):
"""
Export all the data.
download_folder = list of 3 pathlib Paths, for each of the 3 downloads
"""
if download_folder is None:
download_folder = [None] * 3
assert len(download_folder) == 3, "Must have 3 download folders for the 3 exports!"
# first, make sure the class was initialized correctly
self.update_parameters(locations_filepath=Path('data/yield_data.csv'),
collection_id='MODIS/MOD09A1')
# # pull_MODIS_entire_county_clip.py
self.export(folder_name='crop_yield-data_image', data_type='image',
min_img_val=16000, max_img_val=100,
export_limit=export_limit, major_states_only=major_states_only,
check_if_done=check_if_done, download_folder=download_folder[0])
# pull_MODIS_landcover_entire_county_clip.py
self.update_parameters(collection_id='MODIS/051/MCD12Q1')
self.export(folder_name='crop_yield-data_mask', data_type='mask',
export_limit=export_limit, major_states_only=major_states_only,
check_if_done=check_if_done, download_folder=download_folder[1])
# pull_MODIS_temperature_entire_county_clip.py
self.update_parameters(collection_id='MODIS/MYD11A2')
self.export(folder_name='crop_yield-data_temperature', data_type='temperature',
export_limit=export_limit, major_states_only=major_states_only,
check_if_done=check_if_done, download_folder=download_folder[2])
print('Done exporting! Download the folders from your Google Drive')
def _append_mask_band(current, previous):
# Transforms an Image Collection with 1 band per Image into a single Image with items as bands
# Author: <NAME>
# Rename the band
previous = ee.Image(previous)
current = current.select([0])
# Append it to the result (Note: only return current item on first element/iteration)
return ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, previous.addBands(ee.Image(current)))
def _append_temp_band(current, previous):
# Transforms an Image Collection with 1 band per Image into a single Image with items as bands
# Author: <NAME>
# Rename the band
previous = ee.Image(previous)
current = current.select([0, 4])
# Append it to the result (Note: only return current item on first element/iteration)
return ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, previous.addBands(ee.Image(current)))
def _append_im_band(current, previous):
# Transforms an Image Collection with 1 band per Image into a single Image with items as bands
# Author: <NAME>
# Rename the band
previous = ee.Image(previous)
current = current.select([0, 1, 2, 3, 4, 5, 6])
# Append it to the result (Note: only return current item on first element/iteration)
return ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, previous.addBands(ee.Image(current)))
| <filename>cyp/data/exporting.py<gh_stars>10-100
import ee
import ssl
import time
from pathlib import Path
import numpy as np
from .utils import load_clean_yield_data as load
from .utils import get_tif_files
from .. import MAJOR_STATES
class MODISExporter:
""" A class to export MODIS data from
the Google Earth Engine to Google Drive
Parameters
----------
locations_filepath: pathlib Path, default=Path('data/yield_data.csv')
A path to the yield data
collection_id: str, default='MODIS/051/MCD12Q1'
The ID Earth Engine Image Collection being exported
"""
def __init__(self, locations_filepath=Path('data/yield_data.csv'),
collection_id='MODIS/051/MCD12Q1'):
self.locations = load(locations_filepath)
self.collection_id = collection_id
try:
ee.Initialize()
print('The Earth Engine package initialized successfully!')
except ee.EEException:
print('The Earth Engine package failed to initialize! '
'Have you authenticated the earth engine?')
def update_parameters(self, locations_filepath=None, collection_id=None):
"""
Update the locations file or the collection id
"""
if locations_filepath is not None:
self.locations = load(locations_filepath)
if collection_id is not None:
self.collection_id = collection_id
@staticmethod
def _export_one_image(img, folder, name, region, scale, crs):
# export one image from Earth Engine to Google Drive
# Author: <NAME>, https://github.com/JiaxuanYou
print(f'Exporting to {folder}/{name}')
task_dict = {
'driveFolder': folder,
'driveFileNamePrefix': name,
'scale': scale,
'crs': crs
}
if region is not None:
task_dict.update({
'region': region
})
task = ee.batch.Export.image(img, name, task_dict)
task.start()
while task.status()['state'] == 'RUNNING':
print('Running...')
# Perhaps task.cancel() at some point.
time.sleep(10)
print(f'Done: {task.status()}')
def export(self, folder_name, data_type, coordinate_system='EPSG:4326', scale=500,
export_limit=None, min_img_val=None, max_img_val=None, major_states_only=True,
check_if_done=False, download_folder=None):
"""Export an Image Collection from Earth Engine to Google Drive
Parameters
----------
folder_name: str
The name of the folder to export the images to in
Google Drive. If the folder is not there, this process
creates it
data_type: str {'image', 'mask', 'temperature'}
The type of data we are collecting. This tells us which bands to collect.
coordinate_system: str, default='EPSG:4326'
The coordinate system in which to export the data
scale: int, default=500
The pixel resolution, as determined by the output.
https://developers.google.com/earth-engine/scale
export_limit: int or None, default=None
If not none, limits the number of files exported to the value
passed.
min_img_val = int or None:
A minimum value to clip the band values to
max_img_val: int or None
A maximum value to clip the band values to
major_states_only: boolean, default=True
Whether to only use the 11 states responsible for 75 % of national soybean
production, as is done in the paper
check_if_done: boolean, default=False
If true, will check download_folder for any .tif files which have already been
downloaded, and won't export them again. This effectively allows for
checkpointing, and prevents all files from having to be downloaded at once.
download_folder: None or pathlib Path, default=None
Which folder to check for downloaded files, if check_if_done=True. If None, looks
in data/folder_name
"""
if check_if_done:
if download_folder is None:
download_folder = Path('data') / folder_name
already_downloaded = get_tif_files(download_folder)
imgcoll = ee.ImageCollection(self.collection_id) \
.filterBounds(ee.Geometry.Rectangle(-106.5, 50, -64, 23)) \
.filterDate('2002-12-31', '2016-8-4')
datatype_to_func = {
'image': _append_im_band,
'mask': _append_mask_band,
'temperature': _append_temp_band,
}
img = imgcoll.iterate(datatype_to_func[data_type])
img = ee.Image(img)
# "clip" the values of the bands
if min_img_val is not None:
# passing en ee.Number creates a constant image
img_min = ee.Image(ee.Number(min_img_val))
img = img.min(img_min)
if max_img_val is not None:
img_max = ee.Image(ee.Number(max_img_val))
img = img.max(img_max)
# note that the county regions are pulled from Google's Fusion tables. This calls a merge
# of county geometry and census data:
# https://fusiontables.google.com/data?docid=1S4EB6319wWW2sWQDPhDvmSBIVrD3iEmCLYB7nMM#rows:id=1
region = ee.FeatureCollection('TIGER/2018/Counties')
# turn the strings into numbers, see
# https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_Counties
def state_to_int(feature):
return feature.set('COUNTYFP', ee.Number.parse(feature.get('COUNTYFP')))
region = region.map(state_to_int)
count = 0
for state_id, county_id in np.unique(self.locations[['State ANSI', 'County ANSI']].values, axis=0):
if major_states_only:
if int(state_id) not in MAJOR_STATES:
print(f'Skipping state id {int(state_id)}')
continue
fname = '{}_{}'.format(int(state_id), int(county_id))
if check_if_done:
if f'{fname}.tif' in already_downloaded:
print(f'{fname}.tif already downloaded! Skipping')
continue
file_region = region.filterMetadata('COUNTYFP', 'equals', int(county_id))
file_region = ee.Feature(file_region.first())
processed_img = img.clip(file_region)
file_region = None
while True:
try:
self._export_one_image(processed_img, folder_name, fname, file_region, scale, coordinate_system)
except (ee.ee_exception.EEException, ssl.SSLEOFError):
print(f'Retrying State {int(state_id)}, County {int(county_id)}')
time.sleep(10)
continue
break
count += 1
if export_limit:
if count >= export_limit:
print('Reached export limit! Stopping')
break
print(f'Finished Exporting {count} files!')
def export_all(self, export_limit=None, major_states_only=True, check_if_done=True,
download_folder=None):
"""
Export all the data.
download_folder = list of 3 pathlib Paths, for each of the 3 downloads
"""
if download_folder is None:
download_folder = [None] * 3
assert len(download_folder) == 3, "Must have 3 download folders for the 3 exports!"
# first, make sure the class was initialized correctly
self.update_parameters(locations_filepath=Path('data/yield_data.csv'),
collection_id='MODIS/MOD09A1')
# # pull_MODIS_entire_county_clip.py
self.export(folder_name='crop_yield-data_image', data_type='image',
min_img_val=16000, max_img_val=100,
export_limit=export_limit, major_states_only=major_states_only,
check_if_done=check_if_done, download_folder=download_folder[0])
# pull_MODIS_landcover_entire_county_clip.py
self.update_parameters(collection_id='MODIS/051/MCD12Q1')
self.export(folder_name='crop_yield-data_mask', data_type='mask',
export_limit=export_limit, major_states_only=major_states_only,
check_if_done=check_if_done, download_folder=download_folder[1])
# pull_MODIS_temperature_entire_county_clip.py
self.update_parameters(collection_id='MODIS/MYD11A2')
self.export(folder_name='crop_yield-data_temperature', data_type='temperature',
export_limit=export_limit, major_states_only=major_states_only,
check_if_done=check_if_done, download_folder=download_folder[2])
print('Done exporting! Download the folders from your Google Drive')
def _append_mask_band(current, previous):
# Transforms an Image Collection with 1 band per Image into a single Image with items as bands
# Author: <NAME>
# Rename the band
previous = ee.Image(previous)
current = current.select([0])
# Append it to the result (Note: only return current item on first element/iteration)
return ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, previous.addBands(ee.Image(current)))
def _append_temp_band(current, previous):
# Transforms an Image Collection with 1 band per Image into a single Image with items as bands
# Author: <NAME>
# Rename the band
previous = ee.Image(previous)
current = current.select([0, 4])
# Append it to the result (Note: only return current item on first element/iteration)
return ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, previous.addBands(ee.Image(current)))
def _append_im_band(current, previous):
# Transforms an Image Collection with 1 band per Image into a single Image with items as bands
# Author: <NAME>
# Rename the band
previous = ee.Image(previous)
current = current.select([0, 1, 2, 3, 4, 5, 6])
# Append it to the result (Note: only return current item on first element/iteration)
return ee.Algorithms.If(ee.Algorithms.IsEqual(previous, None), current, previous.addBands(ee.Image(current)))
| en | 0.796131 | A class to export MODIS data from the Google Earth Engine to Google Drive Parameters ---------- locations_filepath: pathlib Path, default=Path('data/yield_data.csv') A path to the yield data collection_id: str, default='MODIS/051/MCD12Q1' The ID Earth Engine Image Collection being exported Update the locations file or the collection id # export one image from Earth Engine to Google Drive # Author: <NAME>, https://github.com/JiaxuanYou # Perhaps task.cancel() at some point. Export an Image Collection from Earth Engine to Google Drive Parameters ---------- folder_name: str The name of the folder to export the images to in Google Drive. If the folder is not there, this process creates it data_type: str {'image', 'mask', 'temperature'} The type of data we are collecting. This tells us which bands to collect. coordinate_system: str, default='EPSG:4326' The coordinate system in which to export the data scale: int, default=500 The pixel resolution, as determined by the output. https://developers.google.com/earth-engine/scale export_limit: int or None, default=None If not none, limits the number of files exported to the value passed. min_img_val = int or None: A minimum value to clip the band values to max_img_val: int or None A maximum value to clip the band values to major_states_only: boolean, default=True Whether to only use the 11 states responsible for 75 % of national soybean production, as is done in the paper check_if_done: boolean, default=False If true, will check download_folder for any .tif files which have already been downloaded, and won't export them again. This effectively allows for checkpointing, and prevents all files from having to be downloaded at once. download_folder: None or pathlib Path, default=None Which folder to check for downloaded files, if check_if_done=True. If None, looks in data/folder_name # "clip" the values of the bands # passing en ee.Number creates a constant image # note that the county regions are pulled from Google's Fusion tables. This calls a merge # of county geometry and census data: # https://fusiontables.google.com/data?docid=1S4EB6319wWW2sWQDPhDvmSBIVrD3iEmCLYB7nMM#rows:id=1 # turn the strings into numbers, see # https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_Counties Export all the data. download_folder = list of 3 pathlib Paths, for each of the 3 downloads # first, make sure the class was initialized correctly # # pull_MODIS_entire_county_clip.py # pull_MODIS_landcover_entire_county_clip.py # pull_MODIS_temperature_entire_county_clip.py # Transforms an Image Collection with 1 band per Image into a single Image with items as bands # Author: <NAME> # Rename the band # Append it to the result (Note: only return current item on first element/iteration) # Transforms an Image Collection with 1 band per Image into a single Image with items as bands # Author: <NAME> # Rename the band # Append it to the result (Note: only return current item on first element/iteration) # Transforms an Image Collection with 1 band per Image into a single Image with items as bands # Author: <NAME> # Rename the band # Append it to the result (Note: only return current item on first element/iteration) | 2.657733 | 3 |
host/greatfet/util/console.py | grvvy/greatfet | 328 | 6630103 | #!/usr/bin/env python3
#
# This file is part of GreatFET.
#
# Console I/O handling; lifted from pyserial/miniterm.py, which uses the same BSD license.
# (C)2002-2017 <NAME> <<EMAIL>>
#
import os
import sys
import codecs
# Python 2/3 compatible method for using raw_input, and unicode-aware chr.
try:
raw_input
except NameError:
raw_input = input
unichr = chr
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
if isinstance(byte_string, tuple):
byte_string = byte_string[0]
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
class NTConsole(ConsoleBase):
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
def __init__(self):
import ctypes
super(NTConsole, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(self.Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(self.Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
import ctypes
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
import msvcrt
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
import ctypes
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
class POSIXConsole(ConsoleBase):
def __init__(self):
import atexit
import termios
super(POSIXConsole, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
def setup(self):
import termios
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
try:
c = sys.stdin.buffer.read(1)
except AttributeError:
c = sys.stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
import termios
import fcntl
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
import termios
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
def Console():
""" Factory method that returns the Console object most appropriate for the current OS envrionment. """
if os.name == "posix":
return POSIXConsole()
elif os.name == "nt":
return NTConsole()
else:
raise NotImplementedError("Console support not implemented for OS '{}'.".format(os.name))
| #!/usr/bin/env python3
#
# This file is part of GreatFET.
#
# Console I/O handling; lifted from pyserial/miniterm.py, which uses the same BSD license.
# (C)2002-2017 <NAME> <<EMAIL>>
#
import os
import sys
import codecs
# Python 2/3 compatible method for using raw_input, and unicode-aware chr.
try:
raw_input
except NameError:
raw_input = input
unichr = chr
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
if isinstance(byte_string, tuple):
byte_string = byte_string[0]
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
class NTConsole(ConsoleBase):
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
def __init__(self):
import ctypes
super(NTConsole, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(self.Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(self.Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
import ctypes
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
import msvcrt
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
import ctypes
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
class POSIXConsole(ConsoleBase):
def __init__(self):
import atexit
import termios
super(POSIXConsole, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
def setup(self):
import termios
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
try:
c = sys.stdin.buffer.read(1)
except AttributeError:
c = sys.stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
import termios
import fcntl
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
import termios
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
def Console():
""" Factory method that returns the Console object most appropriate for the current OS envrionment. """
if os.name == "posix":
return POSIXConsole()
elif os.name == "nt":
return NTConsole()
else:
raise NotImplementedError("Console support not implemented for OS '{}'.".format(os.name))
| en | 0.69245 | #!/usr/bin/env python3 # # This file is part of GreatFET. # # Console I/O handling; lifted from pyserial/miniterm.py, which uses the same BSD license. # (C)2002-2017 <NAME> <<EMAIL>> # # Python 2/3 compatible method for using raw_input, and unicode-aware chr. OS abstraction for console (input/output codec, no echo) Set console to read single characters, no echo Restore default console settings Read a single key from the console Write bytes (already encoded) Write string Cancel getkey operation # - - - - - - - - - - - - - - - - - - - - - - - - # context manager: # switch terminal temporary to normal mode (e.g. to get user input) file-like wrapper that uses os.write # the change of the code page is not propagated to Python, manually fix it # needed for input # functions keys, ignore # CancelIo, CancelSynchronousIo do not seem to work when using # getwch, so instead, send a key to the window with the console # map the BS key (which yields DEL) to backspace Factory method that returns the Console object most appropriate for the current OS envrionment. | 3.116551 | 3 |
python/pyspark/mllib/classification.py | philipphoffmann/spark | 0 | 6630104 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import numpy
from numpy import array
from pyspark.mllib.common import callMLlibFunc
from pyspark.mllib.linalg import SparseVector, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint, LinearModel, _regression_train_wrapper
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'SVMModel',
'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes']
class LogisticRegressionModel(LinearModel):
"""A linear binary classification model derived from logistic regression.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data))
>>> lrm.predict(array([1.0])) > 0
True
>>> lrm.predict(array([0.0])) <= 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data))
>>> lrm.predict(array([0.0, 1.0])) > 0
True
>>> lrm.predict(array([0.0, 0.0])) <= 0
True
>>> lrm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> lrm.predict(SparseVector(2, {1: 0.0})) <= 0
True
"""
def predict(self, x):
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
return 1 if prob > 0.5 else 0
class LogisticRegressionWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=1.0, regType="none", intercept=False):
"""
Train a logistic regression model on the given data.
:param data: The training data, an RDD of LabeledPoint.
:param iterations: The number of iterations (default: 100).
:param step: The step parameter used in SGD
(default: 1.0).
:param miniBatchFraction: Fraction of data to be used for each SGD
iteration.
:param initialWeights: The initial weights (default: None).
:param regParam: The regularizer parameter (default: 1.0).
:param regType: The type of regularizer used for training
our model.
:Allowed values:
- "l1" for using L1Updater
- "l2" for using SquaredL2Updater
- "none" for no regularizer
(default: "none")
@param intercept: Boolean parameter which indicates the use
or not of the augmented representation for
training data (i.e. whether bias features
are activated or not).
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, iterations, step,
miniBatchFraction, i, regParam, regType, intercept)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearModel):
"""A support vector machine.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data))
>>> svm.predict(array([1.0])) > 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data))
>>> svm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> svm.predict(SparseVector(2, {0: -1.0})) <= 0
True
"""
def predict(self, x):
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
return 1 if margin >= 0 else 0
class SVMWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, regParam=1.0,
miniBatchFraction=1.0, initialWeights=None, regType="none", intercept=False):
"""
Train a support vector machine on the given data.
:param data: The training data, an RDD of LabeledPoint.
:param iterations: The number of iterations (default: 100).
:param step: The step parameter used in SGD
(default: 1.0).
:param regParam: The regularizer parameter (default: 1.0).
:param miniBatchFraction: Fraction of data to be used for each SGD
iteration.
:param initialWeights: The initial weights (default: None).
:param regType: The type of regularizer used for training
our model.
:Allowed values:
- "l1" for using L1Updater
- "l2" for using SquaredL2Updater,
- "none" for no regularizer.
(default: "none")
@param intercept: Boolean parameter which indicates the use
or not of the augmented representation for
training data (i.e. whether bias features
are activated or not).
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, iterations, step, regParam,
miniBatchFraction, i, regType, intercept)
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
class NaiveBayesModel(object):
"""
Model for Naive Bayes classifiers.
Contains two parameters:
- pi: vector of logs of class priors (dimension C)
- theta: matrix of logs of class conditional probabilities (CxD)
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
def predict(self, x):
"""Return the most likely class for a data vector x"""
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
class NaiveBayes(object):
@classmethod
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features) vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can
handle all kinds of discrete data. For example, by converting
documents into TF-IDF vectors, it can be used for document
classification. By making every vector a 0-1 vector, it can also be
used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
:param data: RDD of LabeledPoint.
:param lambda_: The smoothing parameter
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayes", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
def _test():
import doctest
from pyspark import SparkContext
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import numpy
from numpy import array
from pyspark.mllib.common import callMLlibFunc
from pyspark.mllib.linalg import SparseVector, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint, LinearModel, _regression_train_wrapper
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'SVMModel',
'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes']
class LogisticRegressionModel(LinearModel):
"""A linear binary classification model derived from logistic regression.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data))
>>> lrm.predict(array([1.0])) > 0
True
>>> lrm.predict(array([0.0])) <= 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data))
>>> lrm.predict(array([0.0, 1.0])) > 0
True
>>> lrm.predict(array([0.0, 0.0])) <= 0
True
>>> lrm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> lrm.predict(SparseVector(2, {1: 0.0})) <= 0
True
"""
def predict(self, x):
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
return 1 if prob > 0.5 else 0
class LogisticRegressionWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=1.0, regType="none", intercept=False):
"""
Train a logistic regression model on the given data.
:param data: The training data, an RDD of LabeledPoint.
:param iterations: The number of iterations (default: 100).
:param step: The step parameter used in SGD
(default: 1.0).
:param miniBatchFraction: Fraction of data to be used for each SGD
iteration.
:param initialWeights: The initial weights (default: None).
:param regParam: The regularizer parameter (default: 1.0).
:param regType: The type of regularizer used for training
our model.
:Allowed values:
- "l1" for using L1Updater
- "l2" for using SquaredL2Updater
- "none" for no regularizer
(default: "none")
@param intercept: Boolean parameter which indicates the use
or not of the augmented representation for
training data (i.e. whether bias features
are activated or not).
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, iterations, step,
miniBatchFraction, i, regParam, regType, intercept)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearModel):
"""A support vector machine.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data))
>>> svm.predict(array([1.0])) > 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data))
>>> svm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> svm.predict(SparseVector(2, {0: -1.0})) <= 0
True
"""
def predict(self, x):
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
return 1 if margin >= 0 else 0
class SVMWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, regParam=1.0,
miniBatchFraction=1.0, initialWeights=None, regType="none", intercept=False):
"""
Train a support vector machine on the given data.
:param data: The training data, an RDD of LabeledPoint.
:param iterations: The number of iterations (default: 100).
:param step: The step parameter used in SGD
(default: 1.0).
:param regParam: The regularizer parameter (default: 1.0).
:param miniBatchFraction: Fraction of data to be used for each SGD
iteration.
:param initialWeights: The initial weights (default: None).
:param regType: The type of regularizer used for training
our model.
:Allowed values:
- "l1" for using L1Updater
- "l2" for using SquaredL2Updater,
- "none" for no regularizer.
(default: "none")
@param intercept: Boolean parameter which indicates the use
or not of the augmented representation for
training data (i.e. whether bias features
are activated or not).
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, iterations, step, regParam,
miniBatchFraction, i, regType, intercept)
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
class NaiveBayesModel(object):
"""
Model for Naive Bayes classifiers.
Contains two parameters:
- pi: vector of logs of class priors (dimension C)
- theta: matrix of logs of class conditional probabilities (CxD)
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
def predict(self, x):
"""Return the most likely class for a data vector x"""
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
class NaiveBayes(object):
@classmethod
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features) vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can
handle all kinds of discrete data. For example, by converting
documents into TF-IDF vectors, it can be used for document
classification. By making every vector a 0-1 vector, it can also be
used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
:param data: RDD of LabeledPoint.
:param lambda_: The smoothing parameter
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayes", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
def _test():
import doctest
from pyspark import SparkContext
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| en | 0.542835 | # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A linear binary classification model derived from logistic regression. >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data)) >>> lrm.predict(array([1.0])) > 0 True >>> lrm.predict(array([0.0])) <= 0 True >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data)) >>> lrm.predict(array([0.0, 1.0])) > 0 True >>> lrm.predict(array([0.0, 0.0])) <= 0 True >>> lrm.predict(SparseVector(2, {1: 1.0})) > 0 True >>> lrm.predict(SparseVector(2, {1: 0.0})) <= 0 True Train a logistic regression model on the given data. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations (default: 100). :param step: The step parameter used in SGD (default: 1.0). :param miniBatchFraction: Fraction of data to be used for each SGD iteration. :param initialWeights: The initial weights (default: None). :param regParam: The regularizer parameter (default: 1.0). :param regType: The type of regularizer used for training our model. :Allowed values: - "l1" for using L1Updater - "l2" for using SquaredL2Updater - "none" for no regularizer (default: "none") @param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e. whether bias features are activated or not). A support vector machine. >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> svm = SVMWithSGD.train(sc.parallelize(data)) >>> svm.predict(array([1.0])) > 0 True >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: -1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> svm = SVMWithSGD.train(sc.parallelize(sparse_data)) >>> svm.predict(SparseVector(2, {1: 1.0})) > 0 True >>> svm.predict(SparseVector(2, {0: -1.0})) <= 0 True Train a support vector machine on the given data. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations (default: 100). :param step: The step parameter used in SGD (default: 1.0). :param regParam: The regularizer parameter (default: 1.0). :param miniBatchFraction: Fraction of data to be used for each SGD iteration. :param initialWeights: The initial weights (default: None). :param regType: The type of regularizer used for training our model. :Allowed values: - "l1" for using L1Updater - "l2" for using SquaredL2Updater, - "none" for no regularizer. (default: "none") @param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e. whether bias features are activated or not). Model for Naive Bayes classifiers. Contains two parameters: - pi: vector of logs of class priors (dimension C) - theta: matrix of logs of class conditional probabilities (CxD) >>> data = [ ... LabeledPoint(0.0, [0.0, 0.0]), ... LabeledPoint(0.0, [0.0, 1.0]), ... LabeledPoint(1.0, [1.0, 0.0]), ... ] >>> model = NaiveBayes.train(sc.parallelize(data)) >>> model.predict(array([0.0, 1.0])) 0.0 >>> model.predict(array([1.0, 0.0])) 1.0 >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {1: 0.0})), ... LabeledPoint(0.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {0: 1.0})) ... ] >>> model = NaiveBayes.train(sc.parallelize(sparse_data)) >>> model.predict(SparseVector(2, {1: 1.0})) 0.0 >>> model.predict(SparseVector(2, {0: 1.0})) 1.0 Return the most likely class for a data vector x Train a Naive Bayes model given an RDD of (label, features) vectors. This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can handle all kinds of discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a 0-1 vector, it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}). :param data: RDD of LabeledPoint. :param lambda_: The smoothing parameter | 1.920399 | 2 |
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/models/encryption_services.py | pjquirk/azure-sdk-for-python | 1 | 6630105 | <filename>sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/models/encryption_services.py<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EncryptionServices(Model):
"""A list of services that support encryption.
Variables are only populated by the server, and will be ignored when
sending a request.
:param blob: The encryption function of the blob storage service.
:type blob: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
:param file: The encryption function of the file storage service.
:type file: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
:ivar table: The encryption function of the table storage service.
:vartype table: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
:ivar queue: The encryption function of the queue storage service.
:vartype queue: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
"""
_validation = {
'table': {'readonly': True},
'queue': {'readonly': True},
}
_attribute_map = {
'blob': {'key': 'blob', 'type': 'EncryptionService'},
'file': {'key': 'file', 'type': 'EncryptionService'},
'table': {'key': 'table', 'type': 'EncryptionService'},
'queue': {'key': 'queue', 'type': 'EncryptionService'},
}
def __init__(self, **kwargs):
super(EncryptionServices, self).__init__(**kwargs)
self.blob = kwargs.get('blob', None)
self.file = kwargs.get('file', None)
self.table = None
self.queue = None
| <filename>sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/models/encryption_services.py<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EncryptionServices(Model):
"""A list of services that support encryption.
Variables are only populated by the server, and will be ignored when
sending a request.
:param blob: The encryption function of the blob storage service.
:type blob: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
:param file: The encryption function of the file storage service.
:type file: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
:ivar table: The encryption function of the table storage service.
:vartype table: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
:ivar queue: The encryption function of the queue storage service.
:vartype queue: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService
"""
_validation = {
'table': {'readonly': True},
'queue': {'readonly': True},
}
_attribute_map = {
'blob': {'key': 'blob', 'type': 'EncryptionService'},
'file': {'key': 'file', 'type': 'EncryptionService'},
'table': {'key': 'table', 'type': 'EncryptionService'},
'queue': {'key': 'queue', 'type': 'EncryptionService'},
}
def __init__(self, **kwargs):
super(EncryptionServices, self).__init__(**kwargs)
self.blob = kwargs.get('blob', None)
self.file = kwargs.get('file', None)
self.table = None
self.queue = None
| en | 0.596284 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- A list of services that support encryption. Variables are only populated by the server, and will be ignored when sending a request. :param blob: The encryption function of the blob storage service. :type blob: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService :param file: The encryption function of the file storage service. :type file: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService :ivar table: The encryption function of the table storage service. :vartype table: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService :ivar queue: The encryption function of the queue storage service. :vartype queue: ~azure.mgmt.storage.v2018_11_01.models.EncryptionService | 1.895593 | 2 |
nlp_commons/ubound.py | manderous/struct-learning-with-flow | 63 | 6630106 | <gh_stars>10-100
# Copyright (C) 2007-2011 <NAME>
# URL: <http://www.cs.famaf.unc.edu.ar/~francolq/>
# For license information, see LICENSE.txt
#!/usr/bin/python
# Calculo de precision y recall para el topline UBOUND
"""
WSJ10
Cantidad de arboles: 7422.0
Medidas sumando todos los brackets:
Precision: 78.8
Recall: 100.0
Media harmonica F1: 88.1
NEGRA10
Cantidad de arboles: 7537.0
Medidas sumando todos los brackets:
Precision: 56.4
Recall: 100.0
Media harmonica F1: 72.1
CAST3LB10
Cantidad de arboles: 712.0
Medidas sumando todos los brackets:
Precision: 70.1
Recall: 100.0
Media harmonica F1: 82.4
"""
from . import model, bracketing
class UBound(model.BracketingModel):
trained = True
tested = True
def __init__(self, treebank):
self.Gold = [bracketing.tree_to_bracketing(t) for t in treebank.trees]
# FIXME: no esta bien adaptado para usar count_fullspan_bracket
def measures(self, i):
g = self.Gold[i]
n = len(g.brackets)
# m es la cant. de brackets del supuesto parse
m = g.length - 2
if m > 0:
if self.count_fullspan_bracket:
prec = float(n+1) / float(m+1)
else:
prec = float(n) / float(m)
else:
prec = 1.0
rec = 1.0
return (prec, rec)
def measures2(self, i):
g = self.Gold[i]
n = len(g.brackets)
m = g.length - 2
if self.count_fullspan_bracket:
return (n+1, m+1, n+1)
else:
return (n, m, n)
def main():
print('WSJ10')
main1()
print('NEGRA10')
main2()
print('CAST3LB10')
main3()
def main1():
from . import wsj10
tb = wsj10.WSJ10()
m = UBound(tb)
m.eval()
return m
def main2():
from . import negra10
tb = negra10.Negra10()
tb.simplify_tags()
m = UBound(tb)
m.eval()
return m
def main3():
from . import cast3lb10
tb = cast3lb10.Cast3LB10()
tb.simplify_tags()
m = UBound(tb)
m.eval()
return m
# VIEJO:
"""wsj10 = wsj.get_wsj10_treebank()
# Recall es 1, obvio.
p = 0.0
r = 1.0
brackets_ok = 0
brackets_parse = 0
brackets_gold = 0
# Cantidad de arboles:
m = 0
for t in wsj10:
n = len(t.leaves())
if n >= 3:
m = m+1
# print str(m)+"-esima frase..."
s = t.spannings(leaves=False,root=False,unary=False)
precision = float(len(s)) / float(n-2)
brackets_parse += n-2
brackets_gold += len(s)
p = p + precision
p = p / float(m)
print "Cantidad de arboles:", m
print "Medidas promediando p y r por frase:"
print " Precision de UBOUND:", p
print " Recall de UBOUND:", r
print " Media harmonica F1:", 2*(p*r)/(p+r)
p = float(brackets_gold) / float(brackets_parse)
print "Medidas sumando todos los brackets:"
print " Precision de UBOUND:", p
print " Recall de UBOUND:", r
print " Media harmonica F1:", 2*(p*r)/(p+r)"""
# Cantidad de arboles: 7056
# Medidas promediando p y r por frase:
# Precision de UBOUND: 0.740901529262
# Recall de UBOUND: 1.0
# Media harmonica F1: 0.851169944777
# Medidas sumando todos los brackets:
# Precision de UBOUND: 0.747252747253
# Recall de UBOUND: 1.0
# Media harmonica F1: 0.85534591195
# Intento de usar eval del que desisti antes de fracasar:
# (deberia programar un binarize y que el parse sea eso)
"""import eval
wsj10 = wsj.get_wsj10_treebank()
Gold = []
Parse = []
for t in wsj10:
if len(t.leaves()) >= 3:
g = t.spannings(leaves=False,root=False)"""
# Debugging:
"""
Para ir tirando arboles hasta encontrar el que da precision > 1 (UBOUND):
from wsj import *
l = []
m = 0
for e in get_treebank_iterator():
e.filter_tags()
n = len(e.leaves())
if n <= 10:
m = m+1
print str(m)+"-esima frase..."
l = l + [t]
# Cuento los spans que coinciden no trivialmente con rbranch:
s = e.spannings(leaves=False)
s.remove((0,n))
if len(s) > float(n-2):
break
"""
| # Copyright (C) 2007-2011 <NAME>
# URL: <http://www.cs.famaf.unc.edu.ar/~francolq/>
# For license information, see LICENSE.txt
#!/usr/bin/python
# Calculo de precision y recall para el topline UBOUND
"""
WSJ10
Cantidad de arboles: 7422.0
Medidas sumando todos los brackets:
Precision: 78.8
Recall: 100.0
Media harmonica F1: 88.1
NEGRA10
Cantidad de arboles: 7537.0
Medidas sumando todos los brackets:
Precision: 56.4
Recall: 100.0
Media harmonica F1: 72.1
CAST3LB10
Cantidad de arboles: 712.0
Medidas sumando todos los brackets:
Precision: 70.1
Recall: 100.0
Media harmonica F1: 82.4
"""
from . import model, bracketing
class UBound(model.BracketingModel):
trained = True
tested = True
def __init__(self, treebank):
self.Gold = [bracketing.tree_to_bracketing(t) for t in treebank.trees]
# FIXME: no esta bien adaptado para usar count_fullspan_bracket
def measures(self, i):
g = self.Gold[i]
n = len(g.brackets)
# m es la cant. de brackets del supuesto parse
m = g.length - 2
if m > 0:
if self.count_fullspan_bracket:
prec = float(n+1) / float(m+1)
else:
prec = float(n) / float(m)
else:
prec = 1.0
rec = 1.0
return (prec, rec)
def measures2(self, i):
g = self.Gold[i]
n = len(g.brackets)
m = g.length - 2
if self.count_fullspan_bracket:
return (n+1, m+1, n+1)
else:
return (n, m, n)
def main():
print('WSJ10')
main1()
print('NEGRA10')
main2()
print('CAST3LB10')
main3()
def main1():
from . import wsj10
tb = wsj10.WSJ10()
m = UBound(tb)
m.eval()
return m
def main2():
from . import negra10
tb = negra10.Negra10()
tb.simplify_tags()
m = UBound(tb)
m.eval()
return m
def main3():
from . import cast3lb10
tb = cast3lb10.Cast3LB10()
tb.simplify_tags()
m = UBound(tb)
m.eval()
return m
# VIEJO:
"""wsj10 = wsj.get_wsj10_treebank()
# Recall es 1, obvio.
p = 0.0
r = 1.0
brackets_ok = 0
brackets_parse = 0
brackets_gold = 0
# Cantidad de arboles:
m = 0
for t in wsj10:
n = len(t.leaves())
if n >= 3:
m = m+1
# print str(m)+"-esima frase..."
s = t.spannings(leaves=False,root=False,unary=False)
precision = float(len(s)) / float(n-2)
brackets_parse += n-2
brackets_gold += len(s)
p = p + precision
p = p / float(m)
print "Cantidad de arboles:", m
print "Medidas promediando p y r por frase:"
print " Precision de UBOUND:", p
print " Recall de UBOUND:", r
print " Media harmonica F1:", 2*(p*r)/(p+r)
p = float(brackets_gold) / float(brackets_parse)
print "Medidas sumando todos los brackets:"
print " Precision de UBOUND:", p
print " Recall de UBOUND:", r
print " Media harmonica F1:", 2*(p*r)/(p+r)"""
# Cantidad de arboles: 7056
# Medidas promediando p y r por frase:
# Precision de UBOUND: 0.740901529262
# Recall de UBOUND: 1.0
# Media harmonica F1: 0.851169944777
# Medidas sumando todos los brackets:
# Precision de UBOUND: 0.747252747253
# Recall de UBOUND: 1.0
# Media harmonica F1: 0.85534591195
# Intento de usar eval del que desisti antes de fracasar:
# (deberia programar un binarize y que el parse sea eso)
"""import eval
wsj10 = wsj.get_wsj10_treebank()
Gold = []
Parse = []
for t in wsj10:
if len(t.leaves()) >= 3:
g = t.spannings(leaves=False,root=False)"""
# Debugging:
"""
Para ir tirando arboles hasta encontrar el que da precision > 1 (UBOUND):
from wsj import *
l = []
m = 0
for e in get_treebank_iterator():
e.filter_tags()
n = len(e.leaves())
if n <= 10:
m = m+1
print str(m)+"-esima frase..."
l = l + [t]
# Cuento los spans que coinciden no trivialmente con rbranch:
s = e.spannings(leaves=False)
s.remove((0,n))
if len(s) > float(n-2):
break
""" | es | 0.233402 | # Copyright (C) 2007-2011 <NAME> # URL: <http://www.cs.famaf.unc.edu.ar/~francolq/> # For license information, see LICENSE.txt #!/usr/bin/python # Calculo de precision y recall para el topline UBOUND WSJ10 Cantidad de arboles: 7422.0 Medidas sumando todos los brackets: Precision: 78.8 Recall: 100.0 Media harmonica F1: 88.1 NEGRA10 Cantidad de arboles: 7537.0 Medidas sumando todos los brackets: Precision: 56.4 Recall: 100.0 Media harmonica F1: 72.1 CAST3LB10 Cantidad de arboles: 712.0 Medidas sumando todos los brackets: Precision: 70.1 Recall: 100.0 Media harmonica F1: 82.4 # FIXME: no esta bien adaptado para usar count_fullspan_bracket # m es la cant. de brackets del supuesto parse # VIEJO: wsj10 = wsj.get_wsj10_treebank() # Recall es 1, obvio. p = 0.0 r = 1.0 brackets_ok = 0 brackets_parse = 0 brackets_gold = 0 # Cantidad de arboles: m = 0 for t in wsj10: n = len(t.leaves()) if n >= 3: m = m+1 # print str(m)+"-esima frase..." s = t.spannings(leaves=False,root=False,unary=False) precision = float(len(s)) / float(n-2) brackets_parse += n-2 brackets_gold += len(s) p = p + precision p = p / float(m) print "Cantidad de arboles:", m print "Medidas promediando p y r por frase:" print " Precision de UBOUND:", p print " Recall de UBOUND:", r print " Media harmonica F1:", 2*(p*r)/(p+r) p = float(brackets_gold) / float(brackets_parse) print "Medidas sumando todos los brackets:" print " Precision de UBOUND:", p print " Recall de UBOUND:", r print " Media harmonica F1:", 2*(p*r)/(p+r) # Cantidad de arboles: 7056 # Medidas promediando p y r por frase: # Precision de UBOUND: 0.740901529262 # Recall de UBOUND: 1.0 # Media harmonica F1: 0.851169944777 # Medidas sumando todos los brackets: # Precision de UBOUND: 0.747252747253 # Recall de UBOUND: 1.0 # Media harmonica F1: 0.85534591195 # Intento de usar eval del que desisti antes de fracasar: # (deberia programar un binarize y que el parse sea eso) import eval wsj10 = wsj.get_wsj10_treebank() Gold = [] Parse = [] for t in wsj10: if len(t.leaves()) >= 3: g = t.spannings(leaves=False,root=False) # Debugging: Para ir tirando arboles hasta encontrar el que da precision > 1 (UBOUND): from wsj import * l = [] m = 0 for e in get_treebank_iterator(): e.filter_tags() n = len(e.leaves()) if n <= 10: m = m+1 print str(m)+"-esima frase..." l = l + [t] # Cuento los spans que coinciden no trivialmente con rbranch: s = e.spannings(leaves=False) s.remove((0,n)) if len(s) > float(n-2): break | 2.526222 | 3 |
xfel/command_line/striping.py | dperl-sol/cctbx_project | 155 | 6630107 | <reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
from six.moves import range
# -*- Mode: Python; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cctbx.xfel.stripe_experiment
#
# Given an LCLS experiment results directory and a trial, group results by
# run group and then distrbute each run group's results into subgroups and run
# dials.combine_experiments (optionally with clustering and selecting clusters).
#
from dials.util import show_mail_on_error
from libtbx.phil import parse
from libtbx.utils import Sorry
from libtbx import easy_run
from xfel.util.dials_file_matcher import match_dials_files
from xfel.util.mp import mp_phil_str as multiprocessing_str
from xfel.util.mp import get_submit_command_chooser
import sys
import os, math
import six
multiprocessing_override_str = '''
mp {
use_mpi = False
}
'''
striping_str = '''
striping {
results_dir = None
.type = path
.help = "LCLS results directory containint runs starting with r."
rungroup = None
.type = int
.multiple = True
.help = "Selected rungroups to stripe. If None, all rungroups are accepted."
run = None
.type = str
.multiple = True
.help = "Selected runs to stripe. If None, all runs are accepted."
trial = None
.type = int
.help = "Trial identifier for an XFEL GUI formatted processing trial."
stripe = False
.type = bool
.help = "Enable to select results evenly spaced across each rungroup"
"(stripes) as opposed to contiguous chunks."
chunk_size = 1000
.type = float
.help = "Maximum number of images per chunk or stripe."
respect_rungroup_barriers = True
.type = bool
.help = "Enforce separation by rungroup at time of striping (default)."
"Turn off to allow multiple rungroups to share a detector model."
dry_run = False
.type = bool
.help = "Only set up jobs but do not execute them"
output_folder = None
.type = path
.help = "Path for output data. If None, use current directory"
}
'''
combining_str = '''
combine_experiments {
clustering {
dendrogram = False
.type = bool
.help = "Overrides any multiprocessing parameters to allow interactive"
.help = "run. Clustering dendrograms can only be displayed in this mode."
}
keep_integrated = False
.type = bool
.help = "Combine refined.expt and integrated.refl files."
.help = "If False, ignore integrated.refl files in favor of"
.help = "indexed.refl files in preparation for reintegrating."
include scope dials.command_line.combine_experiments.phil_scope
}
'''
combining_override_str = '''
combine_experiments {
output {
experiments_filename = FILENAME_combined.expt
reflections_filename = FILENAME_combined.refl
delete_shoeboxes = False
}
reference_from_experiment {
detector = 0
}
clustering {
use = True
}
}
'''
# future feature: filter experiments by rmsd after combining/clustering
filtering_str = '''
filtering {
enable = False
}
'''
refinement_str = '''
refinement {
include scope dials.command_line.refine.phil_scope
input {
experiments = None
reflections = None
}
}
'''
refinement_override_str = '''
refinement {
output {
experiments = FILENAME_refined_CLUSTER.expt
reflections = FILENAME_refined_CLUSTER.refl
include_unused_reflections = False
log = FILENAME_refine_CLUSTER.log
debug_log = FILENAME_refine_CLUSTER.debug.log
}
refinement {
parameterisation {
auto_reduction {
action = remove
}
beam {
fix = all
}
}
refinery {
engine = SparseLevMar
}
reflections {
outlier {
algorithm = sauter_poon
minimum_number_of_reflections = 3
separate_experiments = False
separate_panels = False
}
}
}
input {
experiments = FILENAME_combined_CLUSTER.expt
reflections = FILENAME_combined_CLUSTER.refl
}
}
'''
recompute_mosaicity_str = '''
recompute_mosaicity {
include scope xfel.command_line.recompute_mosaicity.phil_scope
input {
experiments = None
reflections = None
}
}
'''
recompute_mosaicity_override_str = '''
recompute_mosaicity {
input {
experiments = FILENAME_refined_CLUSTER.expt
reflections = FILENAME_refined_CLUSTER.refl
}
output {
experiments = FILENAME_refined_CLUSTER.expt
reflections = FILENAME_refined_CLUSTER.refl
}
}
'''
# reintegration after dials refinement
reintegration_str = '''
reintegration {
enable = True
include scope xfel.merging.command_line.mpi_integrate.phil_scope
input {
experiments = None
reflections = None
}
}
'''
reintegration_override_str = '''
reintegration{
dispatch {
step_list = input balance integrate
}
output {
prefix = FILENAME_reintegrated_CLUSTER
save_experiments_and_reflections = True
}
input {
path = .
experiments_suffix = FILENAME_refined_CLUSTER.expt
reflections_suffix = FILENAME_refined_CLUSTER.refl
}
}
'''
# split results and coerce to integration pickle for merging
postprocessing_str = '''
postprocessing {
enable = True
include scope xfel.command_line.frame_extractor.phil_scope
}
'''
postprocessing_override_str = """
postprocessing {
input {
experiments = FILENAME_reintegrated_CLUSTER*.expt
reflections = FILENAME_reintegrated_CLUSTER*.refl
}
output {
filename = FILENAME_CLUSTER_ITER_extracted.refl
dirname = %s
}
}
"""
master_defaults_str = multiprocessing_str + striping_str + combining_str + filtering_str + \
refinement_str + recompute_mosaicity_str + reintegration_str + postprocessing_str
# initialize a master scope from the multiprocessing phil string
master_defaults_scope = parse(master_defaults_str, process_includes=True)
# update master scope with customized and local phil scopes
phil_scope = master_defaults_scope.fetch(parse(postprocessing_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(reintegration_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(recompute_mosaicity_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(refinement_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(combining_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(multiprocessing_override_str, process_includes=True))
helpstring = """cctbx.xfel.stripe_experiment: parallel processing of an XFEL UI-generated trial.
usage: cctbx.xfel.stripe_experiment striping.results_dir=/path/to/results striping.trial=000
for interactive unit cell clustering, use combine_experiments.clustering.dendrogram=True
"""
def allocate_chunks(results_dir,
trial_no,
rgs_selected=None,
respect_rungroup_barriers=True,
runs_selected=None,
stripe=False,
max_size=1000,
integrated=False):
refl_ending = "_integrated" if integrated else "_indexed"
expt_ending = "_refined.expt"
trial = "%03d" % trial_no
print("processing trial %s" % trial)
if rgs_selected:
rg_condition = lambda rg: rg in rgs_selected
else:
rg_condition = lambda rg: True
rgs = {} # rungroups and associated runs
for run in os.listdir(results_dir):
if runs_selected and run not in runs_selected:
continue
trgs = [trg for trg in os.listdir(os.path.join(results_dir, run))
if (trg[:6] == trial + "_rg") and rg_condition(trg[-5:])]
if not trgs:
continue
rungroups = set([n.split("_")[1] for n in trgs])
for rg in rungroups:
if rg not in rgs:
rgs[rg] = [run]
else:
rgs[rg].append(run)
batch_chunk_nums_sizes = {}
batch_contents = {}
if respect_rungroup_barriers:
batchable = {rg:{rg:runs} for rg, runs in six.iteritems(rgs)}
else:
batchable = {"all":rgs}
# for either grouping, iterate over the top level keys in batchable and
# distribute the events within those "batches" in stripes or chunks
extension = None
for batch, rungroups in six.iteritems(batchable):
rg_by_run = {}
for rungroup, runs in six.iteritems(rungroups):
for run in runs:
rg_by_run[run] = rungroup
n_img = 0
batch_contents[batch] = []
for run, rg in six.iteritems(rg_by_run):
try:
trg = trial + "_" + rg
contents = sorted(os.listdir(os.path.join(results_dir, run, trg, "out")))
except OSError:
print("skipping run %s missing out directory" % run)
continue
abs_contents = [os.path.join(results_dir, run, trg, "out", c)
for c in contents]
batch_contents[batch].extend(abs_contents)
expts = [c for c in contents if c.endswith(expt_ending)]
n_img += len(expts)
if extension is None:
if any(c.endswith(".mpack") for c in contents):
extension = ".mpack"
elif any(c.endswith(".refl") for c in contents):
extension = ".refl"
else:
extension = ".pickle"
if n_img == 0:
print("no images found for %s" % batch)
del batch_contents[batch]
continue
n_chunks = int(math.ceil(n_img/max_size))
chunk_size = int(math.ceil(n_img/n_chunks))
batch_chunk_nums_sizes[batch] = (n_chunks, chunk_size)
if len(batch_contents) == 0:
raise Sorry("no DIALS integration results found.")
refl_ending += extension
batch_chunks = {}
for batch, num_size_tuple in six.iteritems(batch_chunk_nums_sizes):
num, size = num_size_tuple
batch_chunks[batch] = []
contents = batch_contents[batch]
expts = [c for c in contents if c.endswith(expt_ending)]
refls = [c for c in contents if c.endswith(refl_ending)]
expts, refls = match_dials_files(expts, refls, expt_ending, refl_ending)
if stripe:
for i in range(num):
expts_stripe = expts[i::num]
refls_stripe = refls[i::num]
batch_chunks[batch].append((expts_stripe, refls_stripe))
print("striped %d experiments in %s with %d experiments per stripe and %d stripes" % \
(len(expts), batch, len(batch_chunks[batch][0][0]), len(batch_chunks[batch])))
else:
for i in range(num):
expts_chunk = expts[i*size:(i+1)*size]
refls_chunk = refls[i*size:(i+1)*size]
batch_chunks[batch].append((expts_chunk, refls_chunk))
print("chunked %d experiments in %s with %d experiments per chunk and %d chunks" % \
(len(expts), batch, len(batch_chunks[batch][0][0]), len(batch_chunks[batch])))
return batch_chunks
def parse_retaining_scope(args, phil_scope=phil_scope):
if "-c" in args:
phil_scope.show(attributes_level=2)
return
file_phil = []
cmdl_phil = []
for arg in args:
if os.path.isfile(arg):
try:
file_phil.append(parse(file_name=arg))
except Exception as e:
raise Sorry("Unrecognized file: %s" % arg)
else:
try:
cmdl_phil.append(parse(arg))
except Exception as e:
raise Sorry("Unrecognized argument: %s" % arg)
run_scope, unused1 = phil_scope.fetch(sources=file_phil, track_unused_definitions=True)
run_scope, unused2 = run_scope.fetch(sources=cmdl_phil, track_unused_definitions=True)
if any([unused1, unused2]):
msg = "\n".join([str(loc) for loc in unused1 + unused2])
raise Sorry("Unrecognized argument(s): " + msg)
return run_scope
def script_to_expand_over_clusters(clustered_json_name,
phil_template_name, command, location):
"""
Write a bash script to find results of a clustering step and produce customized
phils and commands to run with each of them. For example, run the command
dials.refine ...cluster8.expt ...cluster8.refl ...cluster8.phil followed by
dials.refine ...cluster9.expt ...cluster9.refl ...cluster9.phil.
clustered_json_name, clustered_refl_name and phil_template_name must each
contain an asterisk, and substitution in phil_template itself will occur at
each instance of CLUSTER.
"""
clj_part_first, clj_part_last = clustered_json_name.split("CLUSTER")
clustered_template_name = clj_part_first + "*" + clj_part_last
ph_part_first, ph_part_last = phil_template_name.split("CLUSTER")
bash_str = '''
#! /bin/sh
for file in `ls {clname}`
do export cluster=`echo $file | sed "s:{cljfirst}::; s:{cljlast}::"`
export philname="{phfirst}${cluster}{phlast}"
export outname=`echo $philname | sed "s:.phil:.out:"`
sed "s:CLUSTER:${cluster}:g" {phtempl} > $philname
{command} $philname > $outname
done
'''.format(clname=clustered_template_name, phtempl=phil_template_name,
cljfirst=clj_part_first, cljlast=clj_part_last,
phfirst=ph_part_first, phlast=ph_part_last,
command=command, cluster="{cluster}")
bash_name = "generator".join([ph_part_first, ph_part_last]).split(".phil")[0] + ".sh"
with open(os.path.join(location, bash_name), "wb") as script:
script.write(bash_str)
return bash_name
class Script(object):
def __init__(self, args = None):
'''Initialise the script.'''
# The script usage
self.master_defaults_scope = master_defaults_scope
if args is None: args = sys.argv[1:]
self.run_scope = parse_retaining_scope(args)
self.diff_scope = self.master_defaults_scope.fetch_diff(self.run_scope)
self.params = self.run_scope.extract()
# Validation
if self.params.reintegration.enable:
if self.params.combine_experiments.output.delete_shoeboxes:
raise Sorry("Keep shoeboxes during combine_experiments and joint refinement when reintegrating."+
"Set combine_experiments.output.delete_shoeboxes = False when using reintegration.")
# Setup
self.clustering = self.params.combine_experiments.clustering.use
def set_up_section(self, section_tag, dispatcher_name,
clustering=False, custom_parts=None, lambda_diff_str=None):
diff_str = self.diff_scope.get(section_tag).as_str().replace("FILENAME", self.filename)
if lambda_diff_str is not None:
diff_str = lambda_diff_str(diff_str)
if not clustering:
diff_str = diff_str.replace("_CLUSTER", "")
diff_parts = diff_str.split("\n")[1:-2]
if custom_parts is not None:
for part in custom_parts:
diff_parts.append(part)
diff_str = "\n".join(diff_parts)
phil_filename = "%s_%s_CLUSTER.phil" % (self.filename, section_tag) if clustering else \
"%s_%s.phil" % (self.filename, section_tag)
phil_path = os.path.join(self.params.striping.output_folder, self.intermediates, phil_filename)
if os.path.isfile(phil_path):
os.remove(phil_path)
with open(phil_path, "wb") as phil_outfile:
phil_outfile.write(diff_str.encode() + b"\n")
if clustering:
script = script_to_expand_over_clusters(
self.params.refinement.input.experiments[0].replace("FILENAME", self.filename),
phil_filename,
dispatcher_name,
self.intermediates)
command = ". %s" % os.path.join(self.params.striping.output_folder, self.intermediates, script)
else:
command = "%s %s" % (dispatcher_name, phil_filename)
self.command_sequence.append(command)
def run(self):
'''Execute the script.'''
runs = ["r%04d" % int(r) if r.isnumeric() else r for r in self.params.striping.run]
if self.params.striping.run:
print("processing runs " + ", ".join(runs))
if self.params.striping.rungroup:
print("processing rungroups " + ", ".join(["rg%03d" % rg for rg in self.params.striping.rungroup]))
batch_chunks = allocate_chunks(self.params.striping.results_dir,
self.params.striping.trial,
rgs_selected=["rg%03d" % rg for rg in self.params.striping.rungroup],
respect_rungroup_barriers=self.params.striping.respect_rungroup_barriers,
runs_selected=runs,
stripe=self.params.striping.stripe,
max_size=self.params.striping.chunk_size,
integrated=self.params.combine_experiments.keep_integrated)
self.dirname = os.path.join(self.params.striping.output_folder, "combine_experiments_t%03d" % self.params.striping.trial)
self.intermediates = os.path.join(self.dirname, "intermediates")
self.extracted = os.path.join(self.dirname, "final_extracted")
for d in self.dirname, self.intermediates, self.extracted:
if not os.path.isdir(d):
os.mkdir(d)
if self.params.striping.output_folder is None:
self.params.striping.output_folder = os.getcwd()
tag = "stripe" if self.params.striping.stripe else "chunk"
all_commands = []
for batch, ch_list in six.iteritems(batch_chunks):
for idx in range(len(ch_list)):
chunk = ch_list[idx]
# reset for this chunk/stripe
self.filename = "t%03d_%s_%s%03d" % (self.params.striping.trial, batch, tag, idx)
self.command_sequence = []
# set up the file containing input expts and refls (logging)
chunk_path = os.path.join(self.params.striping.output_folder, self.intermediates, self.filename)
if os.path.isfile(chunk_path):
os.remove(chunk_path)
with open(chunk_path, "wb") as outfile:
for i in (0, 1): # expts then refls
outfile.write(("\n".join(chunk[i]) + "\n").encode())
# set up the params for dials.combine_experiments
custom_parts = [" input {"]
for expt_path in chunk[0]:
custom_parts.append(" experiments = %s" % expt_path)
for refl_path in chunk[1]:
custom_parts.append(" reflections = %s" % refl_path)
custom_parts.append(" }")
self.set_up_section("combine_experiments", "dials.combine_experiments",
clustering=False, custom_parts=custom_parts)
# refinement of the grouped experiments
self.set_up_section("refinement", "dials.refine",
clustering=self.clustering)
# refinement of the grouped experiments
self.set_up_section("recompute_mosaicity", "cctbx.xfel.recompute_mosaicity",
clustering=self.clustering)
# reintegration
if self.params.reintegration.enable:
if self.params.mp.method == 'shifter' or not self.params.mp.mpi_command:
self.set_up_section("reintegration", "cctbx.xfel.mpi_integrate", clustering=self.clustering)
else:
self.set_up_section("reintegration", "%s cctbx.xfel.mpi_integrate"%self.params.mp.mpi_command,
clustering=self.clustering)
# extract results to integration pickles for merging
if self.params.postprocessing.enable:
lambda_diff_str = lambda diff_str: (diff_str % \
(os.path.join("..", "final_extracted"))).replace("ITER", "%04d")
self.set_up_section("postprocessing", "cctbx.xfel.frame_extractor",
lambda_diff_str=lambda_diff_str, clustering=self.clustering)
# submit queued job from appropriate directory
os.chdir(self.intermediates)
command = " && ".join(self.command_sequence)
if self.params.combine_experiments.clustering.dendrogram:
easy_run.fully_buffered(command).raise_if_errors().show_stdout()
else:
submit_folder = os.path.join(self.params.striping.output_folder, self.intermediates)
submit_path = os.path.join(submit_folder, "combine_%s.sh" % self.filename)
submit_command = get_submit_command_chooser(command, submit_path, self.intermediates, self.params.mp,
log_name=os.path.splitext(os.path.basename(submit_path))[0] + ".out",
err_name=os.path.splitext(os.path.basename(submit_path))[0] + ".err",
root_dir = submit_folder)
all_commands.append(submit_command)
if not self.params.striping.dry_run:
print("executing command: %s" % submit_command)
try:
easy_run.fully_buffered(submit_command).raise_if_errors().show_stdout()
except Exception as e:
if not "Warning: job being submitted without an AFS token." in str(e):
raise e
return all_commands
if __name__ == "__main__":
import sys
if "-h" in sys.argv[1:] or "--help" in sys.argv[1:]:
print(helpstring)
exit()
if "-c" in sys.argv[1:]:
expert_level = int(sys.argv[sys.argv.index("-e") + 1]) if "-e" in sys.argv[1:] else 0
attr_level = int(sys.argv[sys.argv.index("-a") + 1]) if "-a" in sys.argv[1:] else 0
phil_scope.show(expert_level=expert_level, attributes_level=attr_level)
with open("striping_defaults.phil", "wb") as defaults:
defaults.write(phil_scope.as_str())
exit()
with show_mail_on_error():
script = Script()
script.run()
| from __future__ import absolute_import, division, print_function
from six.moves import range
# -*- Mode: Python; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cctbx.xfel.stripe_experiment
#
# Given an LCLS experiment results directory and a trial, group results by
# run group and then distrbute each run group's results into subgroups and run
# dials.combine_experiments (optionally with clustering and selecting clusters).
#
from dials.util import show_mail_on_error
from libtbx.phil import parse
from libtbx.utils import Sorry
from libtbx import easy_run
from xfel.util.dials_file_matcher import match_dials_files
from xfel.util.mp import mp_phil_str as multiprocessing_str
from xfel.util.mp import get_submit_command_chooser
import sys
import os, math
import six
multiprocessing_override_str = '''
mp {
use_mpi = False
}
'''
striping_str = '''
striping {
results_dir = None
.type = path
.help = "LCLS results directory containint runs starting with r."
rungroup = None
.type = int
.multiple = True
.help = "Selected rungroups to stripe. If None, all rungroups are accepted."
run = None
.type = str
.multiple = True
.help = "Selected runs to stripe. If None, all runs are accepted."
trial = None
.type = int
.help = "Trial identifier for an XFEL GUI formatted processing trial."
stripe = False
.type = bool
.help = "Enable to select results evenly spaced across each rungroup"
"(stripes) as opposed to contiguous chunks."
chunk_size = 1000
.type = float
.help = "Maximum number of images per chunk or stripe."
respect_rungroup_barriers = True
.type = bool
.help = "Enforce separation by rungroup at time of striping (default)."
"Turn off to allow multiple rungroups to share a detector model."
dry_run = False
.type = bool
.help = "Only set up jobs but do not execute them"
output_folder = None
.type = path
.help = "Path for output data. If None, use current directory"
}
'''
combining_str = '''
combine_experiments {
clustering {
dendrogram = False
.type = bool
.help = "Overrides any multiprocessing parameters to allow interactive"
.help = "run. Clustering dendrograms can only be displayed in this mode."
}
keep_integrated = False
.type = bool
.help = "Combine refined.expt and integrated.refl files."
.help = "If False, ignore integrated.refl files in favor of"
.help = "indexed.refl files in preparation for reintegrating."
include scope dials.command_line.combine_experiments.phil_scope
}
'''
combining_override_str = '''
combine_experiments {
output {
experiments_filename = FILENAME_combined.expt
reflections_filename = FILENAME_combined.refl
delete_shoeboxes = False
}
reference_from_experiment {
detector = 0
}
clustering {
use = True
}
}
'''
# future feature: filter experiments by rmsd after combining/clustering
filtering_str = '''
filtering {
enable = False
}
'''
refinement_str = '''
refinement {
include scope dials.command_line.refine.phil_scope
input {
experiments = None
reflections = None
}
}
'''
refinement_override_str = '''
refinement {
output {
experiments = FILENAME_refined_CLUSTER.expt
reflections = FILENAME_refined_CLUSTER.refl
include_unused_reflections = False
log = FILENAME_refine_CLUSTER.log
debug_log = FILENAME_refine_CLUSTER.debug.log
}
refinement {
parameterisation {
auto_reduction {
action = remove
}
beam {
fix = all
}
}
refinery {
engine = SparseLevMar
}
reflections {
outlier {
algorithm = sauter_poon
minimum_number_of_reflections = 3
separate_experiments = False
separate_panels = False
}
}
}
input {
experiments = FILENAME_combined_CLUSTER.expt
reflections = FILENAME_combined_CLUSTER.refl
}
}
'''
recompute_mosaicity_str = '''
recompute_mosaicity {
include scope xfel.command_line.recompute_mosaicity.phil_scope
input {
experiments = None
reflections = None
}
}
'''
recompute_mosaicity_override_str = '''
recompute_mosaicity {
input {
experiments = FILENAME_refined_CLUSTER.expt
reflections = FILENAME_refined_CLUSTER.refl
}
output {
experiments = FILENAME_refined_CLUSTER.expt
reflections = FILENAME_refined_CLUSTER.refl
}
}
'''
# reintegration after dials refinement
reintegration_str = '''
reintegration {
enable = True
include scope xfel.merging.command_line.mpi_integrate.phil_scope
input {
experiments = None
reflections = None
}
}
'''
reintegration_override_str = '''
reintegration{
dispatch {
step_list = input balance integrate
}
output {
prefix = FILENAME_reintegrated_CLUSTER
save_experiments_and_reflections = True
}
input {
path = .
experiments_suffix = FILENAME_refined_CLUSTER.expt
reflections_suffix = FILENAME_refined_CLUSTER.refl
}
}
'''
# split results and coerce to integration pickle for merging
postprocessing_str = '''
postprocessing {
enable = True
include scope xfel.command_line.frame_extractor.phil_scope
}
'''
postprocessing_override_str = """
postprocessing {
input {
experiments = FILENAME_reintegrated_CLUSTER*.expt
reflections = FILENAME_reintegrated_CLUSTER*.refl
}
output {
filename = FILENAME_CLUSTER_ITER_extracted.refl
dirname = %s
}
}
"""
master_defaults_str = multiprocessing_str + striping_str + combining_str + filtering_str + \
refinement_str + recompute_mosaicity_str + reintegration_str + postprocessing_str
# initialize a master scope from the multiprocessing phil string
master_defaults_scope = parse(master_defaults_str, process_includes=True)
# update master scope with customized and local phil scopes
phil_scope = master_defaults_scope.fetch(parse(postprocessing_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(reintegration_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(recompute_mosaicity_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(refinement_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(combining_override_str, process_includes=True))
phil_scope = phil_scope.fetch(parse(multiprocessing_override_str, process_includes=True))
helpstring = """cctbx.xfel.stripe_experiment: parallel processing of an XFEL UI-generated trial.
usage: cctbx.xfel.stripe_experiment striping.results_dir=/path/to/results striping.trial=000
for interactive unit cell clustering, use combine_experiments.clustering.dendrogram=True
"""
def allocate_chunks(results_dir,
trial_no,
rgs_selected=None,
respect_rungroup_barriers=True,
runs_selected=None,
stripe=False,
max_size=1000,
integrated=False):
refl_ending = "_integrated" if integrated else "_indexed"
expt_ending = "_refined.expt"
trial = "%03d" % trial_no
print("processing trial %s" % trial)
if rgs_selected:
rg_condition = lambda rg: rg in rgs_selected
else:
rg_condition = lambda rg: True
rgs = {} # rungroups and associated runs
for run in os.listdir(results_dir):
if runs_selected and run not in runs_selected:
continue
trgs = [trg for trg in os.listdir(os.path.join(results_dir, run))
if (trg[:6] == trial + "_rg") and rg_condition(trg[-5:])]
if not trgs:
continue
rungroups = set([n.split("_")[1] for n in trgs])
for rg in rungroups:
if rg not in rgs:
rgs[rg] = [run]
else:
rgs[rg].append(run)
batch_chunk_nums_sizes = {}
batch_contents = {}
if respect_rungroup_barriers:
batchable = {rg:{rg:runs} for rg, runs in six.iteritems(rgs)}
else:
batchable = {"all":rgs}
# for either grouping, iterate over the top level keys in batchable and
# distribute the events within those "batches" in stripes or chunks
extension = None
for batch, rungroups in six.iteritems(batchable):
rg_by_run = {}
for rungroup, runs in six.iteritems(rungroups):
for run in runs:
rg_by_run[run] = rungroup
n_img = 0
batch_contents[batch] = []
for run, rg in six.iteritems(rg_by_run):
try:
trg = trial + "_" + rg
contents = sorted(os.listdir(os.path.join(results_dir, run, trg, "out")))
except OSError:
print("skipping run %s missing out directory" % run)
continue
abs_contents = [os.path.join(results_dir, run, trg, "out", c)
for c in contents]
batch_contents[batch].extend(abs_contents)
expts = [c for c in contents if c.endswith(expt_ending)]
n_img += len(expts)
if extension is None:
if any(c.endswith(".mpack") for c in contents):
extension = ".mpack"
elif any(c.endswith(".refl") for c in contents):
extension = ".refl"
else:
extension = ".pickle"
if n_img == 0:
print("no images found for %s" % batch)
del batch_contents[batch]
continue
n_chunks = int(math.ceil(n_img/max_size))
chunk_size = int(math.ceil(n_img/n_chunks))
batch_chunk_nums_sizes[batch] = (n_chunks, chunk_size)
if len(batch_contents) == 0:
raise Sorry("no DIALS integration results found.")
refl_ending += extension
batch_chunks = {}
for batch, num_size_tuple in six.iteritems(batch_chunk_nums_sizes):
num, size = num_size_tuple
batch_chunks[batch] = []
contents = batch_contents[batch]
expts = [c for c in contents if c.endswith(expt_ending)]
refls = [c for c in contents if c.endswith(refl_ending)]
expts, refls = match_dials_files(expts, refls, expt_ending, refl_ending)
if stripe:
for i in range(num):
expts_stripe = expts[i::num]
refls_stripe = refls[i::num]
batch_chunks[batch].append((expts_stripe, refls_stripe))
print("striped %d experiments in %s with %d experiments per stripe and %d stripes" % \
(len(expts), batch, len(batch_chunks[batch][0][0]), len(batch_chunks[batch])))
else:
for i in range(num):
expts_chunk = expts[i*size:(i+1)*size]
refls_chunk = refls[i*size:(i+1)*size]
batch_chunks[batch].append((expts_chunk, refls_chunk))
print("chunked %d experiments in %s with %d experiments per chunk and %d chunks" % \
(len(expts), batch, len(batch_chunks[batch][0][0]), len(batch_chunks[batch])))
return batch_chunks
def parse_retaining_scope(args, phil_scope=phil_scope):
if "-c" in args:
phil_scope.show(attributes_level=2)
return
file_phil = []
cmdl_phil = []
for arg in args:
if os.path.isfile(arg):
try:
file_phil.append(parse(file_name=arg))
except Exception as e:
raise Sorry("Unrecognized file: %s" % arg)
else:
try:
cmdl_phil.append(parse(arg))
except Exception as e:
raise Sorry("Unrecognized argument: %s" % arg)
run_scope, unused1 = phil_scope.fetch(sources=file_phil, track_unused_definitions=True)
run_scope, unused2 = run_scope.fetch(sources=cmdl_phil, track_unused_definitions=True)
if any([unused1, unused2]):
msg = "\n".join([str(loc) for loc in unused1 + unused2])
raise Sorry("Unrecognized argument(s): " + msg)
return run_scope
def script_to_expand_over_clusters(clustered_json_name,
phil_template_name, command, location):
"""
Write a bash script to find results of a clustering step and produce customized
phils and commands to run with each of them. For example, run the command
dials.refine ...cluster8.expt ...cluster8.refl ...cluster8.phil followed by
dials.refine ...cluster9.expt ...cluster9.refl ...cluster9.phil.
clustered_json_name, clustered_refl_name and phil_template_name must each
contain an asterisk, and substitution in phil_template itself will occur at
each instance of CLUSTER.
"""
clj_part_first, clj_part_last = clustered_json_name.split("CLUSTER")
clustered_template_name = clj_part_first + "*" + clj_part_last
ph_part_first, ph_part_last = phil_template_name.split("CLUSTER")
bash_str = '''
#! /bin/sh
for file in `ls {clname}`
do export cluster=`echo $file | sed "s:{cljfirst}::; s:{cljlast}::"`
export philname="{phfirst}${cluster}{phlast}"
export outname=`echo $philname | sed "s:.phil:.out:"`
sed "s:CLUSTER:${cluster}:g" {phtempl} > $philname
{command} $philname > $outname
done
'''.format(clname=clustered_template_name, phtempl=phil_template_name,
cljfirst=clj_part_first, cljlast=clj_part_last,
phfirst=ph_part_first, phlast=ph_part_last,
command=command, cluster="{cluster}")
bash_name = "generator".join([ph_part_first, ph_part_last]).split(".phil")[0] + ".sh"
with open(os.path.join(location, bash_name), "wb") as script:
script.write(bash_str)
return bash_name
class Script(object):
def __init__(self, args = None):
'''Initialise the script.'''
# The script usage
self.master_defaults_scope = master_defaults_scope
if args is None: args = sys.argv[1:]
self.run_scope = parse_retaining_scope(args)
self.diff_scope = self.master_defaults_scope.fetch_diff(self.run_scope)
self.params = self.run_scope.extract()
# Validation
if self.params.reintegration.enable:
if self.params.combine_experiments.output.delete_shoeboxes:
raise Sorry("Keep shoeboxes during combine_experiments and joint refinement when reintegrating."+
"Set combine_experiments.output.delete_shoeboxes = False when using reintegration.")
# Setup
self.clustering = self.params.combine_experiments.clustering.use
def set_up_section(self, section_tag, dispatcher_name,
clustering=False, custom_parts=None, lambda_diff_str=None):
diff_str = self.diff_scope.get(section_tag).as_str().replace("FILENAME", self.filename)
if lambda_diff_str is not None:
diff_str = lambda_diff_str(diff_str)
if not clustering:
diff_str = diff_str.replace("_CLUSTER", "")
diff_parts = diff_str.split("\n")[1:-2]
if custom_parts is not None:
for part in custom_parts:
diff_parts.append(part)
diff_str = "\n".join(diff_parts)
phil_filename = "%s_%s_CLUSTER.phil" % (self.filename, section_tag) if clustering else \
"%s_%s.phil" % (self.filename, section_tag)
phil_path = os.path.join(self.params.striping.output_folder, self.intermediates, phil_filename)
if os.path.isfile(phil_path):
os.remove(phil_path)
with open(phil_path, "wb") as phil_outfile:
phil_outfile.write(diff_str.encode() + b"\n")
if clustering:
script = script_to_expand_over_clusters(
self.params.refinement.input.experiments[0].replace("FILENAME", self.filename),
phil_filename,
dispatcher_name,
self.intermediates)
command = ". %s" % os.path.join(self.params.striping.output_folder, self.intermediates, script)
else:
command = "%s %s" % (dispatcher_name, phil_filename)
self.command_sequence.append(command)
def run(self):
'''Execute the script.'''
runs = ["r%04d" % int(r) if r.isnumeric() else r for r in self.params.striping.run]
if self.params.striping.run:
print("processing runs " + ", ".join(runs))
if self.params.striping.rungroup:
print("processing rungroups " + ", ".join(["rg%03d" % rg for rg in self.params.striping.rungroup]))
batch_chunks = allocate_chunks(self.params.striping.results_dir,
self.params.striping.trial,
rgs_selected=["rg%03d" % rg for rg in self.params.striping.rungroup],
respect_rungroup_barriers=self.params.striping.respect_rungroup_barriers,
runs_selected=runs,
stripe=self.params.striping.stripe,
max_size=self.params.striping.chunk_size,
integrated=self.params.combine_experiments.keep_integrated)
self.dirname = os.path.join(self.params.striping.output_folder, "combine_experiments_t%03d" % self.params.striping.trial)
self.intermediates = os.path.join(self.dirname, "intermediates")
self.extracted = os.path.join(self.dirname, "final_extracted")
for d in self.dirname, self.intermediates, self.extracted:
if not os.path.isdir(d):
os.mkdir(d)
if self.params.striping.output_folder is None:
self.params.striping.output_folder = os.getcwd()
tag = "stripe" if self.params.striping.stripe else "chunk"
all_commands = []
for batch, ch_list in six.iteritems(batch_chunks):
for idx in range(len(ch_list)):
chunk = ch_list[idx]
# reset for this chunk/stripe
self.filename = "t%03d_%s_%s%03d" % (self.params.striping.trial, batch, tag, idx)
self.command_sequence = []
# set up the file containing input expts and refls (logging)
chunk_path = os.path.join(self.params.striping.output_folder, self.intermediates, self.filename)
if os.path.isfile(chunk_path):
os.remove(chunk_path)
with open(chunk_path, "wb") as outfile:
for i in (0, 1): # expts then refls
outfile.write(("\n".join(chunk[i]) + "\n").encode())
# set up the params for dials.combine_experiments
custom_parts = [" input {"]
for expt_path in chunk[0]:
custom_parts.append(" experiments = %s" % expt_path)
for refl_path in chunk[1]:
custom_parts.append(" reflections = %s" % refl_path)
custom_parts.append(" }")
self.set_up_section("combine_experiments", "dials.combine_experiments",
clustering=False, custom_parts=custom_parts)
# refinement of the grouped experiments
self.set_up_section("refinement", "dials.refine",
clustering=self.clustering)
# refinement of the grouped experiments
self.set_up_section("recompute_mosaicity", "cctbx.xfel.recompute_mosaicity",
clustering=self.clustering)
# reintegration
if self.params.reintegration.enable:
if self.params.mp.method == 'shifter' or not self.params.mp.mpi_command:
self.set_up_section("reintegration", "cctbx.xfel.mpi_integrate", clustering=self.clustering)
else:
self.set_up_section("reintegration", "%s cctbx.xfel.mpi_integrate"%self.params.mp.mpi_command,
clustering=self.clustering)
# extract results to integration pickles for merging
if self.params.postprocessing.enable:
lambda_diff_str = lambda diff_str: (diff_str % \
(os.path.join("..", "final_extracted"))).replace("ITER", "%04d")
self.set_up_section("postprocessing", "cctbx.xfel.frame_extractor",
lambda_diff_str=lambda_diff_str, clustering=self.clustering)
# submit queued job from appropriate directory
os.chdir(self.intermediates)
command = " && ".join(self.command_sequence)
if self.params.combine_experiments.clustering.dendrogram:
easy_run.fully_buffered(command).raise_if_errors().show_stdout()
else:
submit_folder = os.path.join(self.params.striping.output_folder, self.intermediates)
submit_path = os.path.join(submit_folder, "combine_%s.sh" % self.filename)
submit_command = get_submit_command_chooser(command, submit_path, self.intermediates, self.params.mp,
log_name=os.path.splitext(os.path.basename(submit_path))[0] + ".out",
err_name=os.path.splitext(os.path.basename(submit_path))[0] + ".err",
root_dir = submit_folder)
all_commands.append(submit_command)
if not self.params.striping.dry_run:
print("executing command: %s" % submit_command)
try:
easy_run.fully_buffered(submit_command).raise_if_errors().show_stdout()
except Exception as e:
if not "Warning: job being submitted without an AFS token." in str(e):
raise e
return all_commands
if __name__ == "__main__":
import sys
if "-h" in sys.argv[1:] or "--help" in sys.argv[1:]:
print(helpstring)
exit()
if "-c" in sys.argv[1:]:
expert_level = int(sys.argv[sys.argv.index("-e") + 1]) if "-e" in sys.argv[1:] else 0
attr_level = int(sys.argv[sys.argv.index("-a") + 1]) if "-a" in sys.argv[1:] else 0
phil_scope.show(expert_level=expert_level, attributes_level=attr_level)
with open("striping_defaults.phil", "wb") as defaults:
defaults.write(phil_scope.as_str())
exit()
with show_mail_on_error():
script = Script()
script.run() | en | 0.639844 | # -*- Mode: Python; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*- # # LIBTBX_SET_DISPATCHER_NAME cctbx.xfel.stripe_experiment # # Given an LCLS experiment results directory and a trial, group results by # run group and then distrbute each run group's results into subgroups and run # dials.combine_experiments (optionally with clustering and selecting clusters). # mp { use_mpi = False } striping { results_dir = None .type = path .help = "LCLS results directory containint runs starting with r." rungroup = None .type = int .multiple = True .help = "Selected rungroups to stripe. If None, all rungroups are accepted." run = None .type = str .multiple = True .help = "Selected runs to stripe. If None, all runs are accepted." trial = None .type = int .help = "Trial identifier for an XFEL GUI formatted processing trial." stripe = False .type = bool .help = "Enable to select results evenly spaced across each rungroup" "(stripes) as opposed to contiguous chunks." chunk_size = 1000 .type = float .help = "Maximum number of images per chunk or stripe." respect_rungroup_barriers = True .type = bool .help = "Enforce separation by rungroup at time of striping (default)." "Turn off to allow multiple rungroups to share a detector model." dry_run = False .type = bool .help = "Only set up jobs but do not execute them" output_folder = None .type = path .help = "Path for output data. If None, use current directory" } combine_experiments { clustering { dendrogram = False .type = bool .help = "Overrides any multiprocessing parameters to allow interactive" .help = "run. Clustering dendrograms can only be displayed in this mode." } keep_integrated = False .type = bool .help = "Combine refined.expt and integrated.refl files." .help = "If False, ignore integrated.refl files in favor of" .help = "indexed.refl files in preparation for reintegrating." include scope dials.command_line.combine_experiments.phil_scope } combine_experiments { output { experiments_filename = FILENAME_combined.expt reflections_filename = FILENAME_combined.refl delete_shoeboxes = False } reference_from_experiment { detector = 0 } clustering { use = True } } # future feature: filter experiments by rmsd after combining/clustering filtering { enable = False } refinement { include scope dials.command_line.refine.phil_scope input { experiments = None reflections = None } } refinement { output { experiments = FILENAME_refined_CLUSTER.expt reflections = FILENAME_refined_CLUSTER.refl include_unused_reflections = False log = FILENAME_refine_CLUSTER.log debug_log = FILENAME_refine_CLUSTER.debug.log } refinement { parameterisation { auto_reduction { action = remove } beam { fix = all } } refinery { engine = SparseLevMar } reflections { outlier { algorithm = sauter_poon minimum_number_of_reflections = 3 separate_experiments = False separate_panels = False } } } input { experiments = FILENAME_combined_CLUSTER.expt reflections = FILENAME_combined_CLUSTER.refl } } recompute_mosaicity { include scope xfel.command_line.recompute_mosaicity.phil_scope input { experiments = None reflections = None } } recompute_mosaicity { input { experiments = FILENAME_refined_CLUSTER.expt reflections = FILENAME_refined_CLUSTER.refl } output { experiments = FILENAME_refined_CLUSTER.expt reflections = FILENAME_refined_CLUSTER.refl } } # reintegration after dials refinement reintegration { enable = True include scope xfel.merging.command_line.mpi_integrate.phil_scope input { experiments = None reflections = None } } reintegration{ dispatch { step_list = input balance integrate } output { prefix = FILENAME_reintegrated_CLUSTER save_experiments_and_reflections = True } input { path = . experiments_suffix = FILENAME_refined_CLUSTER.expt reflections_suffix = FILENAME_refined_CLUSTER.refl } } # split results and coerce to integration pickle for merging postprocessing { enable = True include scope xfel.command_line.frame_extractor.phil_scope } postprocessing { input { experiments = FILENAME_reintegrated_CLUSTER*.expt reflections = FILENAME_reintegrated_CLUSTER*.refl } output { filename = FILENAME_CLUSTER_ITER_extracted.refl dirname = %s } } # initialize a master scope from the multiprocessing phil string # update master scope with customized and local phil scopes cctbx.xfel.stripe_experiment: parallel processing of an XFEL UI-generated trial. usage: cctbx.xfel.stripe_experiment striping.results_dir=/path/to/results striping.trial=000 for interactive unit cell clustering, use combine_experiments.clustering.dendrogram=True # rungroups and associated runs # for either grouping, iterate over the top level keys in batchable and # distribute the events within those "batches" in stripes or chunks Write a bash script to find results of a clustering step and produce customized phils and commands to run with each of them. For example, run the command dials.refine ...cluster8.expt ...cluster8.refl ...cluster8.phil followed by dials.refine ...cluster9.expt ...cluster9.refl ...cluster9.phil. clustered_json_name, clustered_refl_name and phil_template_name must each contain an asterisk, and substitution in phil_template itself will occur at each instance of CLUSTER. #! /bin/sh for file in `ls {clname}` do export cluster=`echo $file | sed "s:{cljfirst}::; s:{cljlast}::"` export philname="{phfirst}${cluster}{phlast}" export outname=`echo $philname | sed "s:.phil:.out:"` sed "s:CLUSTER:${cluster}:g" {phtempl} > $philname {command} $philname > $outname done Initialise the script. # The script usage # Validation # Setup Execute the script. # reset for this chunk/stripe # set up the file containing input expts and refls (logging) # expts then refls # set up the params for dials.combine_experiments # refinement of the grouped experiments # refinement of the grouped experiments # reintegration # extract results to integration pickles for merging # submit queued job from appropriate directory | 2.058491 | 2 |
homeassistant/components/sensor/nederlandse_spoorwegen.py | shanbs/home-assistant | 2 | 6630108 | <filename>homeassistant/components/sensor/nederlandse_spoorwegen.py<gh_stars>1-10
"""
Support for Nederlandse Spoorwegen public transport.
For more details on this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.nederlandse_spoorwegen/
"""
from datetime import datetime, timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_EMAIL, CONF_NAME, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['nsapi==2.7.4']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by NS"
CONF_ROUTES = 'routes'
CONF_FROM = 'from'
CONF_TO = 'to'
CONF_VIA = 'via'
ICON = 'mdi:train'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
ROUTE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FROM): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_VIA): cv.string})
ROUTES_SCHEMA = vol.All(
cv.ensure_list,
[ROUTE_SCHEMA])
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_ROUTES): ROUTES_SCHEMA,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the departure sensor."""
import ns_api
nsapi = ns_api.NSAPI(
config.get(CONF_EMAIL), config.get(CONF_PASSWORD))
try:
stations = nsapi.get_stations()
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as error:
_LOGGER.error("Couldn't fetch stations, API password correct?: %s",
error)
return
sensors = []
for departure in config.get(CONF_ROUTES):
if(not valid_stations(stations, [departure.get(CONF_FROM),
departure.get(CONF_VIA),
departure.get(CONF_TO)])):
continue
sensors.append(
NSDepartureSensor(
nsapi, departure.get(CONF_NAME), departure.get(CONF_FROM),
departure.get(CONF_TO), departure.get(CONF_VIA)))
if sensors:
add_entities(sensors, True)
def valid_stations(stations, given_stations):
"""Verify the existence of the given station codes."""
for station in given_stations:
if station is None:
continue
if not any(s.code == station.upper() for s in stations):
_LOGGER.warning("Station '%s' is not a valid station.", station)
return False
return True
class NSDepartureSensor(Entity):
"""Implementation of a NS Departure Sensor."""
def __init__(self, nsapi, name, departure, heading, via):
"""Initialize the sensor."""
self._nsapi = nsapi
self._name = name
self._departure = departure
self._via = via
self._heading = heading
self._state = None
self._trips = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self._trips:
return
if self._trips[0].trip_parts:
route = [self._trips[0].departure]
for k in self._trips[0].trip_parts:
route.append(k.destination)
return {
'going': self._trips[0].going,
'departure_time_planned':
self._trips[0].departure_time_planned.strftime('%H:%M'),
'departure_time_actual':
self._trips[0].departure_time_actual.strftime('%H:%M'),
'departure_delay':
self._trips[0].departure_time_planned !=
self._trips[0].departure_time_actual,
'departure_platform':
self._trips[0].trip_parts[0].stops[0].platform,
'departure_platform_changed':
self._trips[0].trip_parts[0].stops[0].platform_changed,
'arrival_time_planned':
self._trips[0].arrival_time_planned.strftime('%H:%M'),
'arrival_time_actual':
self._trips[0].arrival_time_actual.strftime('%H:%M'),
'arrival_delay':
self._trips[0].arrival_time_planned !=
self._trips[0].arrival_time_actual,
'arrival_platform':
self._trips[0].trip_parts[0].stops[-1].platform,
'arrival_platform_changed':
self._trips[0].trip_parts[0].stops[-1].platform_changed,
'next':
self._trips[1].departure_time_actual.strftime('%H:%M'),
'status': self._trips[0].status.lower(),
'transfers': self._trips[0].nr_transfers,
'route': route,
'remarks': [r.message for r in self._trips[0].trip_remarks],
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the trip information."""
try:
self._trips = self._nsapi.get_trips(
datetime.now().strftime("%d-%m-%Y %H:%M"),
self._departure, self._via, self._heading,
True, 0)
if self._trips:
actual_time = self._trips[0].departure_time_actual
self._state = actual_time.strftime('%H:%M')
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as error:
_LOGGER.error("Couldn't fetch trip info: %s", error)
| <filename>homeassistant/components/sensor/nederlandse_spoorwegen.py<gh_stars>1-10
"""
Support for Nederlandse Spoorwegen public transport.
For more details on this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.nederlandse_spoorwegen/
"""
from datetime import datetime, timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_EMAIL, CONF_NAME, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['nsapi==2.7.4']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by NS"
CONF_ROUTES = 'routes'
CONF_FROM = 'from'
CONF_TO = 'to'
CONF_VIA = 'via'
ICON = 'mdi:train'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
ROUTE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FROM): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_VIA): cv.string})
ROUTES_SCHEMA = vol.All(
cv.ensure_list,
[ROUTE_SCHEMA])
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_ROUTES): ROUTES_SCHEMA,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the departure sensor."""
import ns_api
nsapi = ns_api.NSAPI(
config.get(CONF_EMAIL), config.get(CONF_PASSWORD))
try:
stations = nsapi.get_stations()
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as error:
_LOGGER.error("Couldn't fetch stations, API password correct?: %s",
error)
return
sensors = []
for departure in config.get(CONF_ROUTES):
if(not valid_stations(stations, [departure.get(CONF_FROM),
departure.get(CONF_VIA),
departure.get(CONF_TO)])):
continue
sensors.append(
NSDepartureSensor(
nsapi, departure.get(CONF_NAME), departure.get(CONF_FROM),
departure.get(CONF_TO), departure.get(CONF_VIA)))
if sensors:
add_entities(sensors, True)
def valid_stations(stations, given_stations):
"""Verify the existence of the given station codes."""
for station in given_stations:
if station is None:
continue
if not any(s.code == station.upper() for s in stations):
_LOGGER.warning("Station '%s' is not a valid station.", station)
return False
return True
class NSDepartureSensor(Entity):
"""Implementation of a NS Departure Sensor."""
def __init__(self, nsapi, name, departure, heading, via):
"""Initialize the sensor."""
self._nsapi = nsapi
self._name = name
self._departure = departure
self._via = via
self._heading = heading
self._state = None
self._trips = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self._trips:
return
if self._trips[0].trip_parts:
route = [self._trips[0].departure]
for k in self._trips[0].trip_parts:
route.append(k.destination)
return {
'going': self._trips[0].going,
'departure_time_planned':
self._trips[0].departure_time_planned.strftime('%H:%M'),
'departure_time_actual':
self._trips[0].departure_time_actual.strftime('%H:%M'),
'departure_delay':
self._trips[0].departure_time_planned !=
self._trips[0].departure_time_actual,
'departure_platform':
self._trips[0].trip_parts[0].stops[0].platform,
'departure_platform_changed':
self._trips[0].trip_parts[0].stops[0].platform_changed,
'arrival_time_planned':
self._trips[0].arrival_time_planned.strftime('%H:%M'),
'arrival_time_actual':
self._trips[0].arrival_time_actual.strftime('%H:%M'),
'arrival_delay':
self._trips[0].arrival_time_planned !=
self._trips[0].arrival_time_actual,
'arrival_platform':
self._trips[0].trip_parts[0].stops[-1].platform,
'arrival_platform_changed':
self._trips[0].trip_parts[0].stops[-1].platform_changed,
'next':
self._trips[1].departure_time_actual.strftime('%H:%M'),
'status': self._trips[0].status.lower(),
'transfers': self._trips[0].nr_transfers,
'route': route,
'remarks': [r.message for r in self._trips[0].trip_remarks],
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the trip information."""
try:
self._trips = self._nsapi.get_trips(
datetime.now().strftime("%d-%m-%Y %H:%M"),
self._departure, self._via, self._heading,
True, 0)
if self._trips:
actual_time = self._trips[0].departure_time_actual
self._state = actual_time.strftime('%H:%M')
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as error:
_LOGGER.error("Couldn't fetch trip info: %s", error)
| en | 0.646852 | Support for Nederlandse Spoorwegen public transport. For more details on this platform, please refer to the documentation at https://home-assistant.io/components/sensor.nederlandse_spoorwegen/ Set up the departure sensor. Verify the existence of the given station codes. Implementation of a NS Departure Sensor. Initialize the sensor. Return the name of the sensor. Return the icon for the frontend. Return the next departure time. Return the state attributes. Get the trip information. | 2.231611 | 2 |
src/onegov/swissvotes/app.py | politbuero-kampagnen/onegov-cloud | 0 | 6630109 | <filename>src/onegov/swissvotes/app.py
from cached_property import cached_property
from more.content_security import SELF
from onegov.core import Framework
from onegov.core import utils
from onegov.core.framework import default_content_security_policy
from onegov.file import DepotApp
from onegov.form import FormApp
from onegov.quill import QuillApp
from onegov.swissvotes.models import Principal
from onegov.swissvotes.theme import SwissvotesTheme
from onegov.user import UserApp
class SwissvotesApp(Framework, FormApp, QuillApp, DepotApp, UserApp):
""" The swissvotes application. Include this in your onegov.yml to serve
it with onegov-server.
"""
serve_static_files = True
@cached_property
def principal(self):
return Principal()
@cached_property
def static_content_pages(self):
return {'home', 'disclaimer', 'imprint', 'data-protection'}
def get_cached_dataset(self, format):
""" Gets or creates the dataset in the requested format.
We store the dataset using the last modified timestamp - this way, we
have a version of past datasets. Note that we don't delete any old
datasets.
"""
from onegov.swissvotes.collections import SwissVoteCollection
assert format in ('csv', 'xlsx')
votes = SwissVoteCollection(self)
last_modified = votes.last_modified
last_modified = last_modified.timestamp() if last_modified else ''
filename = f'dataset-{last_modified}.{format}'
mode = 'b' if format == 'xlsx' else ''
if not self.filestorage.exists(filename):
with self.filestorage.open(filename, f'w{mode}') as file:
getattr(votes, f'export_{format}')(file)
with self.filestorage.open(filename, f'r{mode}') as file:
result = file.read()
return result
def configure_mfg_api_token(self, **cfg):
""" Configures the Museum für Gestaltung API Token. """
self.mfg_api_token = cfg.get('mfg_api_token', None)
@SwissvotesApp.static_directory()
def get_static_directory():
return 'static'
@SwissvotesApp.template_directory()
def get_template_directory():
return 'templates'
@SwissvotesApp.setting(section='core', name='theme')
def get_theme():
return SwissvotesTheme()
@SwissvotesApp.setting(section='i18n', name='localedirs')
def get_i18n_localedirs():
return [
utils.module_path('onegov.swissvotes', 'locale'),
utils.module_path('onegov.form', 'locale'),
utils.module_path('onegov.user', 'locale')
]
@SwissvotesApp.setting(section='i18n', name='locales')
def get_i18n_used_locales():
return {'de_CH', 'fr_CH', 'en_US'}
@SwissvotesApp.setting(section='i18n', name='default_locale')
def get_i18n_default_locale():
return 'de_CH'
@SwissvotesApp.setting(section='content_security_policy', name='default')
def org_content_security_policy():
policy = default_content_security_policy()
policy.connect_src.add(SELF)
policy.connect_src.add('https://sentry.io')
policy.connect_src.add('https://stats.seantis.ch')
policy.img_src.add('https://www.emuseum.ch')
return policy
@SwissvotesApp.webasset_path()
def get_shared_assets_path():
return utils.module_path('onegov.shared', 'assets/js')
@SwissvotesApp.webasset_path()
def get_js_path():
return 'assets/js'
@SwissvotesApp.webasset_path()
def get_css_path():
return 'assets/css'
@SwissvotesApp.webasset_output()
def get_webasset_output():
return 'assets/bundles'
@SwissvotesApp.webasset('frameworks')
def get_frameworks_asset():
yield 'modernizr.js'
yield 'jquery.js'
yield 'jquery.tablesorter.js'
yield 'tablesaw.css'
yield 'tablesaw.jquery.js'
yield 'tablesaw-create.js'
yield 'tablesaw-init.js'
yield 'd3.js'
yield 'd3.chart.bar.js'
yield 'foundation.js'
yield 'intercooler.js'
yield 'underscore.js'
yield 'sortable.js'
yield 'sortable_custom.js'
yield 'react.js'
yield 'react-dom.js'
yield 'react-dropdown-tree-select.js'
yield 'react-dropdown-tree-select.css'
yield 'form_dependencies.js'
yield 'confirm.jsx'
yield 'jquery.datetimepicker.css'
yield 'jquery.datetimepicker.js'
yield 'datetimepicker.js'
yield 'dropzone.js'
@SwissvotesApp.webasset('common')
def get_common_asset():
yield 'common.js'
yield 'policy-selector.jsx'
yield 'image-gallery.js'
| <filename>src/onegov/swissvotes/app.py
from cached_property import cached_property
from more.content_security import SELF
from onegov.core import Framework
from onegov.core import utils
from onegov.core.framework import default_content_security_policy
from onegov.file import DepotApp
from onegov.form import FormApp
from onegov.quill import QuillApp
from onegov.swissvotes.models import Principal
from onegov.swissvotes.theme import SwissvotesTheme
from onegov.user import UserApp
class SwissvotesApp(Framework, FormApp, QuillApp, DepotApp, UserApp):
""" The swissvotes application. Include this in your onegov.yml to serve
it with onegov-server.
"""
serve_static_files = True
@cached_property
def principal(self):
return Principal()
@cached_property
def static_content_pages(self):
return {'home', 'disclaimer', 'imprint', 'data-protection'}
def get_cached_dataset(self, format):
""" Gets or creates the dataset in the requested format.
We store the dataset using the last modified timestamp - this way, we
have a version of past datasets. Note that we don't delete any old
datasets.
"""
from onegov.swissvotes.collections import SwissVoteCollection
assert format in ('csv', 'xlsx')
votes = SwissVoteCollection(self)
last_modified = votes.last_modified
last_modified = last_modified.timestamp() if last_modified else ''
filename = f'dataset-{last_modified}.{format}'
mode = 'b' if format == 'xlsx' else ''
if not self.filestorage.exists(filename):
with self.filestorage.open(filename, f'w{mode}') as file:
getattr(votes, f'export_{format}')(file)
with self.filestorage.open(filename, f'r{mode}') as file:
result = file.read()
return result
def configure_mfg_api_token(self, **cfg):
""" Configures the Museum für Gestaltung API Token. """
self.mfg_api_token = cfg.get('mfg_api_token', None)
@SwissvotesApp.static_directory()
def get_static_directory():
return 'static'
@SwissvotesApp.template_directory()
def get_template_directory():
return 'templates'
@SwissvotesApp.setting(section='core', name='theme')
def get_theme():
return SwissvotesTheme()
@SwissvotesApp.setting(section='i18n', name='localedirs')
def get_i18n_localedirs():
return [
utils.module_path('onegov.swissvotes', 'locale'),
utils.module_path('onegov.form', 'locale'),
utils.module_path('onegov.user', 'locale')
]
@SwissvotesApp.setting(section='i18n', name='locales')
def get_i18n_used_locales():
return {'de_CH', 'fr_CH', 'en_US'}
@SwissvotesApp.setting(section='i18n', name='default_locale')
def get_i18n_default_locale():
return 'de_CH'
@SwissvotesApp.setting(section='content_security_policy', name='default')
def org_content_security_policy():
policy = default_content_security_policy()
policy.connect_src.add(SELF)
policy.connect_src.add('https://sentry.io')
policy.connect_src.add('https://stats.seantis.ch')
policy.img_src.add('https://www.emuseum.ch')
return policy
@SwissvotesApp.webasset_path()
def get_shared_assets_path():
return utils.module_path('onegov.shared', 'assets/js')
@SwissvotesApp.webasset_path()
def get_js_path():
return 'assets/js'
@SwissvotesApp.webasset_path()
def get_css_path():
return 'assets/css'
@SwissvotesApp.webasset_output()
def get_webasset_output():
return 'assets/bundles'
@SwissvotesApp.webasset('frameworks')
def get_frameworks_asset():
yield 'modernizr.js'
yield 'jquery.js'
yield 'jquery.tablesorter.js'
yield 'tablesaw.css'
yield 'tablesaw.jquery.js'
yield 'tablesaw-create.js'
yield 'tablesaw-init.js'
yield 'd3.js'
yield 'd3.chart.bar.js'
yield 'foundation.js'
yield 'intercooler.js'
yield 'underscore.js'
yield 'sortable.js'
yield 'sortable_custom.js'
yield 'react.js'
yield 'react-dom.js'
yield 'react-dropdown-tree-select.js'
yield 'react-dropdown-tree-select.css'
yield 'form_dependencies.js'
yield 'confirm.jsx'
yield 'jquery.datetimepicker.css'
yield 'jquery.datetimepicker.js'
yield 'datetimepicker.js'
yield 'dropzone.js'
@SwissvotesApp.webasset('common')
def get_common_asset():
yield 'common.js'
yield 'policy-selector.jsx'
yield 'image-gallery.js'
| en | 0.815867 | The swissvotes application. Include this in your onegov.yml to serve it with onegov-server. Gets or creates the dataset in the requested format. We store the dataset using the last modified timestamp - this way, we have a version of past datasets. Note that we don't delete any old datasets. Configures the Museum für Gestaltung API Token. | 2.24728 | 2 |
tests/conftest.py | clbarnes/ncollpyde | 4 | 6630110 | from pathlib import Path
import meshio
import pytest
from ncollpyde import Volume
test_dir = Path(__file__).resolve().parent
project_dir = test_dir.parent
mesh_dir = project_dir / "meshes"
@pytest.fixture
def mesh():
return meshio.read(str(mesh_dir / "teapot.stl"))
@pytest.fixture
def volume(mesh):
return Volume.from_meshio(meshio, validate=True)
@pytest.fixture
def simple_mesh():
return meshio.read(str(mesh_dir / "cube.stl"))
@pytest.fixture
def simple_volume(simple_mesh):
return Volume.from_meshio(simple_mesh, validate=True)
@pytest.fixture
def sez_right():
return Volume.from_meshio(
meshio.read(str(mesh_dir / "SEZ_right.stl")), validate=True
)
| from pathlib import Path
import meshio
import pytest
from ncollpyde import Volume
test_dir = Path(__file__).resolve().parent
project_dir = test_dir.parent
mesh_dir = project_dir / "meshes"
@pytest.fixture
def mesh():
return meshio.read(str(mesh_dir / "teapot.stl"))
@pytest.fixture
def volume(mesh):
return Volume.from_meshio(meshio, validate=True)
@pytest.fixture
def simple_mesh():
return meshio.read(str(mesh_dir / "cube.stl"))
@pytest.fixture
def simple_volume(simple_mesh):
return Volume.from_meshio(simple_mesh, validate=True)
@pytest.fixture
def sez_right():
return Volume.from_meshio(
meshio.read(str(mesh_dir / "SEZ_right.stl")), validate=True
)
| none | 1 | 2.156714 | 2 |
|
throughput.py | mgpadalkar/pidinet | 137 | 6630111 | """
(Testing FPS)
Pixel Difference Networks for Efficient Edge Detection (accepted as an ICCV 2021 oral)
See paper in https://arxiv.org/abs/2108.07009
Author: <NAME>, <NAME>
Date: Aug 22, 2020
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import argparse
import os
import time
import models
from utils import *
from edge_dataloader import BSDS_VOCLoader, BSDS_Loader, Multicue_Loader, NYUD_Loader
from torch.utils.data import DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Diff Convolutional Networks (Train)')
parser.add_argument('--datadir', type=str, default='../data',
help='dir to the dataset')
parser.add_argument('--dataset', type=str, default='BSDS',
help='data settings for BSDS, Multicue and NYUD datasets')
parser.add_argument('--model', type=str, default='baseline',
help='model to train the dataset')
parser.add_argument('--sa', action='store_true',
help='use attention in diffnet')
parser.add_argument('--dil', action='store_true',
help='use dilation in diffnet')
parser.add_argument('--config', type=str, default='nas-all',
help='model configurations, please refer to models/config.py for possible configurations')
parser.add_argument('--seed', type=int, default=None,
help='random seed (default: None)')
parser.add_argument('--gpu', type=str, default='',
help='gpus available')
parser.add_argument('--epochs', type=int, default=150,
help='number of total epochs to run')
parser.add_argument('-j', '--workers', type=int, default=4,
help='number of data loading workers')
parser.add_argument('--eta', type=float, default=0.3,
help='threshold to determine the ground truth')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
def main():
global args
### Refine args
if args.seed is None:
args.seed = int(time.time())
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
args.use_cuda = torch.cuda.is_available()
dataset_setting_choices = ['BSDS', 'NYUD-image', 'NYUD-hha', 'Multicue-boundary-1',
'Multicue-boundary-2', 'Multicue-boundary-3', 'Multicue-edge-1', 'Multicue-edge-2', 'Multicue-edge-3']
if not isinstance(args.dataset, list):
assert args.dataset in dataset_setting_choices, 'unrecognized data setting %s, please choose from %s' % (str(args.dataset), str(dataset_setting_choices))
args.dataset = list(args.dataset.strip().split('-'))
print(args)
### Create model
model = getattr(models, args.model)(args)
### Transfer to cuda devices
if args.use_cuda:
model = torch.nn.DataParallel(model).cuda()
print('cuda is used, with %d gpu devices' % torch.cuda.device_count())
else:
print('cuda is not used, the running might be slow')
### Load Data
if 'BSDS' == args.dataset[0]:
test_dataset = BSDS_VOCLoader(root=args.datadir, split="test", threshold=args.eta)
elif 'Multicue' == args.dataset[0]:
test_dataset = Multicue_Loader(root=args.datadir, split="test", threshold=args.eta, setting=args.dataset[1:])
elif 'NYUD' == args.dataset[0]:
test_dataset = NYUD_Loader(root=args.datadir, split="test", setting=args.dataset[1:])
else:
raise ValueError("unrecognized dataset setting")
test_loader = DataLoader(
test_dataset, batch_size=1, num_workers=args.workers, shuffle=False)
test(test_loader, model, args)
return
def test(test_loader, model, args):
model.eval()
end = time.perf_counter()
torch.cuda.synchronize()
for idx, (image, img_name) in enumerate(test_loader):
with torch.no_grad():
image = image.cuda() if args.use_cuda else image
_, _, H, W = image.shape
results = model(image)
torch.cuda.synchronize()
end = time.perf_counter() - end
print('fps: %f' % (len(test_loader) / end))
if __name__ == '__main__':
main()
print('done')
| """
(Testing FPS)
Pixel Difference Networks for Efficient Edge Detection (accepted as an ICCV 2021 oral)
See paper in https://arxiv.org/abs/2108.07009
Author: <NAME>, <NAME>
Date: Aug 22, 2020
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import argparse
import os
import time
import models
from utils import *
from edge_dataloader import BSDS_VOCLoader, BSDS_Loader, Multicue_Loader, NYUD_Loader
from torch.utils.data import DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Diff Convolutional Networks (Train)')
parser.add_argument('--datadir', type=str, default='../data',
help='dir to the dataset')
parser.add_argument('--dataset', type=str, default='BSDS',
help='data settings for BSDS, Multicue and NYUD datasets')
parser.add_argument('--model', type=str, default='baseline',
help='model to train the dataset')
parser.add_argument('--sa', action='store_true',
help='use attention in diffnet')
parser.add_argument('--dil', action='store_true',
help='use dilation in diffnet')
parser.add_argument('--config', type=str, default='nas-all',
help='model configurations, please refer to models/config.py for possible configurations')
parser.add_argument('--seed', type=int, default=None,
help='random seed (default: None)')
parser.add_argument('--gpu', type=str, default='',
help='gpus available')
parser.add_argument('--epochs', type=int, default=150,
help='number of total epochs to run')
parser.add_argument('-j', '--workers', type=int, default=4,
help='number of data loading workers')
parser.add_argument('--eta', type=float, default=0.3,
help='threshold to determine the ground truth')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
def main():
global args
### Refine args
if args.seed is None:
args.seed = int(time.time())
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
args.use_cuda = torch.cuda.is_available()
dataset_setting_choices = ['BSDS', 'NYUD-image', 'NYUD-hha', 'Multicue-boundary-1',
'Multicue-boundary-2', 'Multicue-boundary-3', 'Multicue-edge-1', 'Multicue-edge-2', 'Multicue-edge-3']
if not isinstance(args.dataset, list):
assert args.dataset in dataset_setting_choices, 'unrecognized data setting %s, please choose from %s' % (str(args.dataset), str(dataset_setting_choices))
args.dataset = list(args.dataset.strip().split('-'))
print(args)
### Create model
model = getattr(models, args.model)(args)
### Transfer to cuda devices
if args.use_cuda:
model = torch.nn.DataParallel(model).cuda()
print('cuda is used, with %d gpu devices' % torch.cuda.device_count())
else:
print('cuda is not used, the running might be slow')
### Load Data
if 'BSDS' == args.dataset[0]:
test_dataset = BSDS_VOCLoader(root=args.datadir, split="test", threshold=args.eta)
elif 'Multicue' == args.dataset[0]:
test_dataset = Multicue_Loader(root=args.datadir, split="test", threshold=args.eta, setting=args.dataset[1:])
elif 'NYUD' == args.dataset[0]:
test_dataset = NYUD_Loader(root=args.datadir, split="test", setting=args.dataset[1:])
else:
raise ValueError("unrecognized dataset setting")
test_loader = DataLoader(
test_dataset, batch_size=1, num_workers=args.workers, shuffle=False)
test(test_loader, model, args)
return
def test(test_loader, model, args):
model.eval()
end = time.perf_counter()
torch.cuda.synchronize()
for idx, (image, img_name) in enumerate(test_loader):
with torch.no_grad():
image = image.cuda() if args.use_cuda else image
_, _, H, W = image.shape
results = model(image)
torch.cuda.synchronize()
end = time.perf_counter() - end
print('fps: %f' % (len(test_loader) / end))
if __name__ == '__main__':
main()
print('done')
| en | 0.66111 | (Testing FPS) Pixel Difference Networks for Efficient Edge Detection (accepted as an ICCV 2021 oral) See paper in https://arxiv.org/abs/2108.07009 Author: <NAME>, <NAME> Date: Aug 22, 2020 ### Refine args ### Create model ### Transfer to cuda devices ### Load Data | 2.34552 | 2 |
src/bpp/tests/test_admin/test_wydawnictwo_ciagle.py | iplweb/bpp | 0 | 6630112 | <reponame>iplweb/bpp
from django.urls import reverse
from model_mommy import mommy
from pbn_api.models import Publication, SentData
from bpp.models import Uczelnia, const
from bpp.tests import normalize_html
def test_wydawnictwo_ciagle_admin_zapisz_bez_linkow(
admin_app, uczelnia, wydawnictwo_ciagle, charaktery_formalne
):
url = "admin:bpp_wydawnictwo_ciagle_change"
page = admin_app.get(reverse(url, args=(wydawnictwo_ciagle.pk,)))
page.forms["wydawnictwo_ciagle_form"][
"tytul_oryginalny"
].value = "Test www.onet.pl formularza"
page.forms["wydawnictwo_ciagle_form"].submit().maybe_follow()
wydawnictwo_ciagle.refresh_from_db()
assert "a href" not in wydawnictwo_ciagle.tytul_oryginalny
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_add_tak(
admin_app, uczelnia, mocker
):
uczelnia.pbn_aktualizuj_na_biezaco = True
uczelnia.pbn_integracja = True
uczelnia.pbn_client = mocker.Mock()
uczelnia.save()
url = "admin:bpp_wydawnictwo_ciagle_add"
page = admin_app.get(reverse(url))
assert "Zapisz i wyślij do PBN" in normalize_html(page.content.decode("utf-8"))
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_add_nie(
admin_app, uczelnia, mocker
):
uczelnia.pbn_aktualizuj_na_biezaco = False
uczelnia.pbn_integracja = True
uczelnia.pbn_client = mocker.Mock()
uczelnia.save()
url = "admin:bpp_wydawnictwo_ciagle_add"
page = admin_app.get(reverse(url))
assert "Zapisz i wyślij do PBN" not in normalize_html(page.content.decode("utf-8"))
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_change_tak(
admin_app, uczelnia, mocker, wydawnictwo_ciagle, charaktery_formalne
):
orig_pbn_client = Uczelnia.pbn_client
try:
pbn_client = mocker.Mock()
Uczelnia.pbn_client = pbn_client
uczelnia.pbn_aktualizuj_na_biezaco = True
uczelnia.pbn_integracja = True
uczelnia.save()
cf = wydawnictwo_ciagle.charakter_formalny
cf.rodzaj_pbn = const.RODZAJ_PBN_ARTYKUL
cf.save()
pub = mommy.make(Publication)
SentData.objects.create(object=wydawnictwo_ciagle, data_sent={}, pbn_uid=pub)
url = "admin:bpp_wydawnictwo_ciagle_change"
page = admin_app.get(reverse(url, args=(wydawnictwo_ciagle.pk,)))
assert "Zapisz i wyślij do PBN" in normalize_html(page.content.decode("utf-8"))
page = (
page.forms["wydawnictwo_ciagle_form"]
.submit("_continue_and_pbn")
.maybe_follow()
)
content = normalize_html(page.content.decode("utf-8"))
assert "pomyślnie zmieniony" in content
assert len(pbn_client.mock_calls) == 4
finally:
Uczelnia.pbn_client = orig_pbn_client
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_change_nie(
admin_app, uczelnia, mocker, wydawnictwo_ciagle
):
orig_pbn_client = Uczelnia.pbn_client
try:
pbn_client = mocker.Mock()
Uczelnia.pbn_client = pbn_client
uczelnia.pbn_aktualizuj_na_biezaco = False
uczelnia.pbn_integracja = True
uczelnia.save()
url = "admin:bpp_wydawnictwo_ciagle_change"
page = admin_app.get(reverse(url, args=(wydawnictwo_ciagle.pk,)))
assert "Zapisz i wyślij do PBN" not in normalize_html(
page.content.decode("utf-8")
)
finally:
Uczelnia.pbn_client = orig_pbn_client
| from django.urls import reverse
from model_mommy import mommy
from pbn_api.models import Publication, SentData
from bpp.models import Uczelnia, const
from bpp.tests import normalize_html
def test_wydawnictwo_ciagle_admin_zapisz_bez_linkow(
admin_app, uczelnia, wydawnictwo_ciagle, charaktery_formalne
):
url = "admin:bpp_wydawnictwo_ciagle_change"
page = admin_app.get(reverse(url, args=(wydawnictwo_ciagle.pk,)))
page.forms["wydawnictwo_ciagle_form"][
"tytul_oryginalny"
].value = "Test www.onet.pl formularza"
page.forms["wydawnictwo_ciagle_form"].submit().maybe_follow()
wydawnictwo_ciagle.refresh_from_db()
assert "a href" not in wydawnictwo_ciagle.tytul_oryginalny
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_add_tak(
admin_app, uczelnia, mocker
):
uczelnia.pbn_aktualizuj_na_biezaco = True
uczelnia.pbn_integracja = True
uczelnia.pbn_client = mocker.Mock()
uczelnia.save()
url = "admin:bpp_wydawnictwo_ciagle_add"
page = admin_app.get(reverse(url))
assert "Zapisz i wyślij do PBN" in normalize_html(page.content.decode("utf-8"))
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_add_nie(
admin_app, uczelnia, mocker
):
uczelnia.pbn_aktualizuj_na_biezaco = False
uczelnia.pbn_integracja = True
uczelnia.pbn_client = mocker.Mock()
uczelnia.save()
url = "admin:bpp_wydawnictwo_ciagle_add"
page = admin_app.get(reverse(url))
assert "Zapisz i wyślij do PBN" not in normalize_html(page.content.decode("utf-8"))
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_change_tak(
admin_app, uczelnia, mocker, wydawnictwo_ciagle, charaktery_formalne
):
orig_pbn_client = Uczelnia.pbn_client
try:
pbn_client = mocker.Mock()
Uczelnia.pbn_client = pbn_client
uczelnia.pbn_aktualizuj_na_biezaco = True
uczelnia.pbn_integracja = True
uczelnia.save()
cf = wydawnictwo_ciagle.charakter_formalny
cf.rodzaj_pbn = const.RODZAJ_PBN_ARTYKUL
cf.save()
pub = mommy.make(Publication)
SentData.objects.create(object=wydawnictwo_ciagle, data_sent={}, pbn_uid=pub)
url = "admin:bpp_wydawnictwo_ciagle_change"
page = admin_app.get(reverse(url, args=(wydawnictwo_ciagle.pk,)))
assert "Zapisz i wyślij do PBN" in normalize_html(page.content.decode("utf-8"))
page = (
page.forms["wydawnictwo_ciagle_form"]
.submit("_continue_and_pbn")
.maybe_follow()
)
content = normalize_html(page.content.decode("utf-8"))
assert "pomyślnie zmieniony" in content
assert len(pbn_client.mock_calls) == 4
finally:
Uczelnia.pbn_client = orig_pbn_client
def test_wydawnictwo_ciagle_admin_zapisz_i_wyslij_do_pbn_change_nie(
admin_app, uczelnia, mocker, wydawnictwo_ciagle
):
orig_pbn_client = Uczelnia.pbn_client
try:
pbn_client = mocker.Mock()
Uczelnia.pbn_client = pbn_client
uczelnia.pbn_aktualizuj_na_biezaco = False
uczelnia.pbn_integracja = True
uczelnia.save()
url = "admin:bpp_wydawnictwo_ciagle_change"
page = admin_app.get(reverse(url, args=(wydawnictwo_ciagle.pk,)))
assert "Zapisz i wyślij do PBN" not in normalize_html(
page.content.decode("utf-8")
)
finally:
Uczelnia.pbn_client = orig_pbn_client | none | 1 | 2.197674 | 2 |
|
arend/settings/__init__.py | pyprogrammerblog/Arend | 0 | 6630113 | from functools import lru_cache
from arend.settings.base import Settings
@lru_cache
def get_settings():
return Settings(**map_secrets)
settings = get_settings()
| from functools import lru_cache
from arend.settings.base import Settings
@lru_cache
def get_settings():
return Settings(**map_secrets)
settings = get_settings()
| none | 1 | 1.537509 | 2 |
|
Python/Math/Mod Divmod/mod_divmod.py | brianchiang-tw/HackerRank | 2 | 6630114 | # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ == '__main__':
x, y = int( input() ), int( input() )
qr_tuple = divmod(x, y)
print( f'{qr_tuple[0]}\n{qr_tuple[1]}\n{qr_tuple}')
| # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ == '__main__':
x, y = int( input() ), int( input() )
qr_tuple = divmod(x, y)
print( f'{qr_tuple[0]}\n{qr_tuple[1]}\n{qr_tuple}')
| en | 0.824269 | # Enter your code here. Read input from STDIN. Print output to STDOUT | 3.307955 | 3 |
spider/urllib2_cookielibtest.py | fuandenghuo/100-days-of-python | 0 | 6630115 | # -*- coding: utf-8 -*-
__author__ = 'abbot'
import urllib2
import cookielib
cookiejar = cookielib.MozillaCookieJar()
cookiejar.load('cookie.txt')
handler = urllib2.HTTPCookieProcessor(cookiejar)
opener = urllib2.build_opener(handler)
response = opener.open("http://www.baidu.com")
print response.read()
| # -*- coding: utf-8 -*-
__author__ = 'abbot'
import urllib2
import cookielib
cookiejar = cookielib.MozillaCookieJar()
cookiejar.load('cookie.txt')
handler = urllib2.HTTPCookieProcessor(cookiejar)
opener = urllib2.build_opener(handler)
response = opener.open("http://www.baidu.com")
print response.read()
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.889963 | 3 |
setup.py | barneygale/mncrft | 0 | 6630116 | from distutils.core import setup
setup(
name='mncrft',
version='1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/barneygale/mncrft',
license='MIT',
description='Minecraft data types library',
long_description=open('README.rst').read(),
install_requires=['bitstring >= 3.1.0'],
test_requires=['pytest'],
packages=[
"mncrft",
"mncrft.buffer",
"mncrft.data_pack",
"mncrft.packet",
],
package_data={'mncrft': [
'packet/data/*.csv',
'data_pack/data/*.nbt']},
)
| from distutils.core import setup
setup(
name='mncrft',
version='1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/barneygale/mncrft',
license='MIT',
description='Minecraft data types library',
long_description=open('README.rst').read(),
install_requires=['bitstring >= 3.1.0'],
test_requires=['pytest'],
packages=[
"mncrft",
"mncrft.buffer",
"mncrft.data_pack",
"mncrft.packet",
],
package_data={'mncrft': [
'packet/data/*.csv',
'data_pack/data/*.nbt']},
)
| none | 1 | 1.191564 | 1 |
|
src/CompilerException.py | demin-dmitriy/almost-haskell | 1 | 6630117 | class CompilerException(Exception):
pass
# Error raises when name collision occures or name could not be resolved
class NameError(CompilerException):
pass
| class CompilerException(Exception):
pass
# Error raises when name collision occures or name could not be resolved
class NameError(CompilerException):
pass
| en | 0.868351 | # Error raises when name collision occures or name could not be resolved | 1.478233 | 1 |
scripts/sources/s_ens_two_scenarios.py | dpopadic/arpmRes | 6 | 6630118 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_ens_two_scenarios [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_ens_two_scenarios&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerENScont).
# +
import numpy as np
import matplotlib.pyplot as plt
from arpym.estimation import effective_num_scenarios
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-parameters)
k_ = 100 # size of grid of probabilities
min_p_1 = 0 # minimum value for p_1
max_p_1 = 1 # maximum value for p_1
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step01): Create flexible probabilities scenarios
# create flexible probabilities
p_1 = np.linspace(min_p_1, max_p_1, num=k_)
p_2 = np.ones(k_)-p_1
p = np.vstack((p_1, p_2))
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step02): Calculate the effective number of scenarios
ens = np.zeros(k_)
for k in range(k_):
ens[k] = effective_num_scenarios(p[:, k])
# ## Plots
# +
plt.style.use('arpm')
f = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0)
plt.plot(p_1, ens, lw=1.5)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$p_1$', fontsize=17)
plt.ylabel('$ens(\mathbf{p})$', fontsize=17)
plt.title('Effective number of scenarios as the flexible probabilities vary\n'
r'$\bar{t}=2$', fontsize=20, fontweight='bold')
add_logo(f, location=1, set_fig_size=False)
plt.tight_layout()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_ens_two_scenarios [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_ens_two_scenarios&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerENScont).
# +
import numpy as np
import matplotlib.pyplot as plt
from arpym.estimation import effective_num_scenarios
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-parameters)
k_ = 100 # size of grid of probabilities
min_p_1 = 0 # minimum value for p_1
max_p_1 = 1 # maximum value for p_1
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step01): Create flexible probabilities scenarios
# create flexible probabilities
p_1 = np.linspace(min_p_1, max_p_1, num=k_)
p_2 = np.ones(k_)-p_1
p = np.vstack((p_1, p_2))
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step02): Calculate the effective number of scenarios
ens = np.zeros(k_)
for k in range(k_):
ens[k] = effective_num_scenarios(p[:, k])
# ## Plots
# +
plt.style.use('arpm')
f = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0)
plt.plot(p_1, ens, lw=1.5)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$p_1$', fontsize=17)
plt.ylabel('$ens(\mathbf{p})$', fontsize=17)
plt.title('Effective number of scenarios as the flexible probabilities vary\n'
r'$\bar{t}=2$', fontsize=20, fontweight='bold')
add_logo(f, location=1, set_fig_size=False)
plt.tight_layout() | en | 0.431807 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # s_ens_two_scenarios [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_ens_two_scenarios&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerENScont). # + # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-parameters) # size of grid of probabilities # minimum value for p_1 # maximum value for p_1 # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step01): Create flexible probabilities scenarios # create flexible probabilities # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step02): Calculate the effective number of scenarios # ## Plots # + | 2.096927 | 2 |
tests/integration/test_uow.py | jeantardelli/architecture-patterns-with-python | 1 | 6630119 | <gh_stars>1-10
#pylint: disable=broad-expect
import threading
import time
import traceback
import pytest
from typing import List
from allocation.domain import model
from allocation.service_layer import unit_of_work
from ..random_refs import random_sku, random_batchref, random_orderid
pytestmark = pytest.mark.usefixtures("mappers")
def insert_batch(session, ref, sku, qty, eta, product_version=1):
session.execute(
"INSERT INTO products (sku, version_number) VALUES (:sku, :version)",
dict(sku=sku, version=product_version))
session.execute(
"INSERT INTO batches (reference, sku, _purchased_quantity, eta)"
" VALUES (:ref, :sku, :qty, :eta)",
dict(ref=ref, sku=sku, qty=qty, eta=eta))
def get_allocated_batch_ref(session, orderid, sku):
[[orderlineid]] = session.execute(
"SELECT id FROM order_lines WHERE orderid=:orderid AND sku=:sku",
dict(orderid=orderid, sku=sku))
[[batchref]] = session.execute(
"SELECT b.reference FROM allocations AS a JOIN batches AS b ON a.batch_id = b.id"
" WHERE orderline_id=:orderlineid",
dict(orderlineid=orderlineid))
return batchref
def test_uow_can_retrieve_a_batch_and_allocate_to_it(sqlite_session_factory):
session = sqlite_session_factory()
insert_batch(session, "batch01", "HIPSTER-WORKBENCH", 100, None)
session.commit()
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
product = uow.products.get(sku="HIPSTER-WORKBENCH")
line = model.OrderLine("o1", "HIPSTER-WORKBENCH", 10)
product.allocate(line)
time.sleep(0.2)
uow.commit()
batchref = get_allocated_batch_ref(session, "o1", "HIPSTER-WORKBENCH")
assert batchref == "batch01"
def test_rolls_back_uncommitted_work_by_default(sqlite_session_factory):
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
insert_batch(uow.session, "batch01", "MEDIUM-PLINTH", 100, None)
new_session = sqlite_session_factory()
rows = list(new_session.execute("SELECT * FROM 'batches'"))
assert rows == []
def test_rolls_back_on_error(sqlite_session_factory):
class MyException(Exception):
pass
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with pytest.raises(MyException):
with uow:
insert_batch(uow.session, "batch01", "LARGE-FORK", 100, None)
raise MyException()
new_session = sqlite_session_factory()
rows = list(new_session.execute("SELECT * FROM 'batches'"))
assert rows == []
def try_to_allocate(orderid, sku, exceptions):
line = model.OrderLine(orderid, sku, 10)
uow = unit_of_work.SqlAlchemyUnitOfWork()
try:
with uow:
product = uow.products.get(sku=sku)
product.allocate(line)
time.sleep(0.5)
uow.commit()
except Exception as e:
print(traceback.format_exc())
exceptions.append(e)
def test_concurrent_updates_to_version_are_not_allowed(mysql_session_factory):
sku, batch = random_sku(), random_batchref()
session = mysql_session_factory()
insert_batch(session, batch, sku, 100, eta=None, product_version=1)
session.commit()
order1, order2 = random_orderid(1), random_orderid(2)
exceptions = [] # type: List[Exception]
try_to_allocate_order1 = lambda : try_to_allocate(order1, sku, exceptions)
try_to_allocate_order2 = lambda : try_to_allocate(order2, sku, exceptions)
thread1 = threading.Thread(target=try_to_allocate_order1)
thread2 = threading.Thread(target=try_to_allocate_order2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
[[version]] = session.execute(
"SELECT version_number FROM products WHERE sku=:sku", dict(sku=sku))
assert version == 2
orders = session.execute(
"SELECT orderid FROM allocations"
" JOIN batches ON allocations.batch_id = batches.id"
" JOIN order_lines ON allocations.orderline_id = order_lines.id"
" WHERE order_lines.sku=:sku",
dict(sku=sku))
[exception] = exceptions
assert "Deadlock found when trying to get lock" in str(exception)
assert orders.rowcount == 1
with unit_of_work.SqlAlchemyUnitOfWork() as uow:
uow.session.execute("SELECT 1")
| #pylint: disable=broad-expect
import threading
import time
import traceback
import pytest
from typing import List
from allocation.domain import model
from allocation.service_layer import unit_of_work
from ..random_refs import random_sku, random_batchref, random_orderid
pytestmark = pytest.mark.usefixtures("mappers")
def insert_batch(session, ref, sku, qty, eta, product_version=1):
session.execute(
"INSERT INTO products (sku, version_number) VALUES (:sku, :version)",
dict(sku=sku, version=product_version))
session.execute(
"INSERT INTO batches (reference, sku, _purchased_quantity, eta)"
" VALUES (:ref, :sku, :qty, :eta)",
dict(ref=ref, sku=sku, qty=qty, eta=eta))
def get_allocated_batch_ref(session, orderid, sku):
[[orderlineid]] = session.execute(
"SELECT id FROM order_lines WHERE orderid=:orderid AND sku=:sku",
dict(orderid=orderid, sku=sku))
[[batchref]] = session.execute(
"SELECT b.reference FROM allocations AS a JOIN batches AS b ON a.batch_id = b.id"
" WHERE orderline_id=:orderlineid",
dict(orderlineid=orderlineid))
return batchref
def test_uow_can_retrieve_a_batch_and_allocate_to_it(sqlite_session_factory):
session = sqlite_session_factory()
insert_batch(session, "batch01", "HIPSTER-WORKBENCH", 100, None)
session.commit()
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
product = uow.products.get(sku="HIPSTER-WORKBENCH")
line = model.OrderLine("o1", "HIPSTER-WORKBENCH", 10)
product.allocate(line)
time.sleep(0.2)
uow.commit()
batchref = get_allocated_batch_ref(session, "o1", "HIPSTER-WORKBENCH")
assert batchref == "batch01"
def test_rolls_back_uncommitted_work_by_default(sqlite_session_factory):
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
insert_batch(uow.session, "batch01", "MEDIUM-PLINTH", 100, None)
new_session = sqlite_session_factory()
rows = list(new_session.execute("SELECT * FROM 'batches'"))
assert rows == []
def test_rolls_back_on_error(sqlite_session_factory):
class MyException(Exception):
pass
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with pytest.raises(MyException):
with uow:
insert_batch(uow.session, "batch01", "LARGE-FORK", 100, None)
raise MyException()
new_session = sqlite_session_factory()
rows = list(new_session.execute("SELECT * FROM 'batches'"))
assert rows == []
def try_to_allocate(orderid, sku, exceptions):
line = model.OrderLine(orderid, sku, 10)
uow = unit_of_work.SqlAlchemyUnitOfWork()
try:
with uow:
product = uow.products.get(sku=sku)
product.allocate(line)
time.sleep(0.5)
uow.commit()
except Exception as e:
print(traceback.format_exc())
exceptions.append(e)
def test_concurrent_updates_to_version_are_not_allowed(mysql_session_factory):
sku, batch = random_sku(), random_batchref()
session = mysql_session_factory()
insert_batch(session, batch, sku, 100, eta=None, product_version=1)
session.commit()
order1, order2 = random_orderid(1), random_orderid(2)
exceptions = [] # type: List[Exception]
try_to_allocate_order1 = lambda : try_to_allocate(order1, sku, exceptions)
try_to_allocate_order2 = lambda : try_to_allocate(order2, sku, exceptions)
thread1 = threading.Thread(target=try_to_allocate_order1)
thread2 = threading.Thread(target=try_to_allocate_order2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
[[version]] = session.execute(
"SELECT version_number FROM products WHERE sku=:sku", dict(sku=sku))
assert version == 2
orders = session.execute(
"SELECT orderid FROM allocations"
" JOIN batches ON allocations.batch_id = batches.id"
" JOIN order_lines ON allocations.orderline_id = order_lines.id"
" WHERE order_lines.sku=:sku",
dict(sku=sku))
[exception] = exceptions
assert "Deadlock found when trying to get lock" in str(exception)
assert orders.rowcount == 1
with unit_of_work.SqlAlchemyUnitOfWork() as uow:
uow.session.execute("SELECT 1") | en | 0.545272 | #pylint: disable=broad-expect # type: List[Exception] | 2.036004 | 2 |
tests/test_ion_hash_tests.py | cheqianh/ion-hash-python | 1 | 6630120 | <reponame>cheqianh/ion-hash-python<gh_stars>1-10
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from six import StringIO
from io import BytesIO
from os.path import abspath, join
import amazon.ion.simpleion as ion
import amazon.ion.reader as ion_reader
from amazon.ion.reader_managed import managed_reader
from amazon.ion.reader_text import text_reader
from amazon.ion.reader_binary import binary_reader
from amazon.ion.reader import NEXT_EVENT
from amazon.ion.reader import SKIP_EVENT
from amazon.ion.writer import blocking_writer
from amazon.ion.writer_text import raw_writer
from amazon.ion.core import IonEventType
from amazon.ion.core import IonEvent
from ionhash.hasher import hash_reader
from ionhash.hasher import hash_writer
from ionhash.hasher import HashEvent
from .util import hash_function_provider
from .util import sexp_to_bytearray
def _test_data(algorithm):
path = abspath(join(abspath(__file__), '..', '..', 'ion-hash-test', 'ion_hash_tests.ion'))
f = open(path)
ion_tests = ion.loads(f.read(), single_value=False)
f.close()
def _has_algorithm(ion_test):
return algorithm in ion_test['expect']
return filter(_has_algorithm, ion_tests)
_IVM = "$ion_1_0 "
_IVM_BYTES = [0xE0, 0x01, 0x00, 0xEA]
def _test_name(ion_test):
if len(ion_test.ion_annotations) > 0:
test_name = ion_test.ion_annotations[0].text
else:
test_name = str(ion.dumps(ion_test['ion'], binary=False))
if test_name.startswith(_IVM):
test_name = test_name[len(_IVM):]
return " " + test_name
def _to_buffer(ion_test, binary):
if 'ion' in ion_test:
v = ion.dumps(ion_test['ion'], binary=binary)
if '10n' in ion_test:
v = bytearray(_IVM_BYTES)
for byte in ion_test['10n']:
v.append(byte)
if not binary:
value = ion.load(BytesIO(v))
v = ion.dumps(value, binary=False)
if binary:
return BytesIO(v)
else:
return StringIO(v)
def _consumer_provider(reader_provider, buf):
def _f(algorithm):
buf.seek(0)
reader = hash_reader(
ion_reader.blocking_reader(managed_reader(reader_provider(), None), buf),
hash_function_provider(algorithm, _actual_updates, _actual_digests))
_consume(reader)
return reader.send(HashEvent.DIGEST)
return _f
def _consume(reader, writer=None):
event = reader.send(NEXT_EVENT)
if writer is not None:
writer.send(event)
while event.event_type is not IonEventType.STREAM_END:
event = reader.send(NEXT_EVENT)
if writer is not None:
writer.send(event)
def _writer_provider(reader_provider, buf):
def _f(algorithm):
buf.seek(0)
reader = ion_reader.blocking_reader(managed_reader(reader_provider(), None), buf)
writer = hash_writer(
blocking_writer(raw_writer(), BytesIO()),
hash_function_provider(algorithm, _actual_updates, _actual_digests))
_consume(reader, writer)
digest = writer.send(HashEvent.DIGEST)
writer.send(IonEvent(IonEventType.STREAM_END))
return digest
return _f
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_binary(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("binary"),
_to_buffer(ion_test, binary=True)))
@pytest.mark.parametrize("ion_test", _test_data("md5"), ids=_test_name)
def test_binary_md5(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("binary"),
_to_buffer(ion_test, binary=True)))
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_text(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("text"),
_to_buffer(ion_test, binary=False)))
@pytest.mark.parametrize("ion_test", _test_data("md5"), ids=_test_name)
def test_text_md5(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("text"),
_to_buffer(ion_test, binary=False)))
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_skip_over(ion_test):
buf = _to_buffer(ion_test, binary=True)
def skipping_consumer(algorithm):
buf.seek(0)
reader = hash_reader(
ion_reader.blocking_reader(managed_reader(_reader_provider("binary")(), None), buf),
hash_function_provider(algorithm, _actual_updates, _actual_digests))
event = reader.send(NEXT_EVENT)
while event.event_type != IonEventType.STREAM_END:
if event.event_type == IonEventType.CONTAINER_START:
event = reader.send(SKIP_EVENT)
else:
event = reader.send(NEXT_EVENT)
return reader.send(HashEvent.DIGEST)
_run_test(ion_test, skipping_consumer)
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_writer(ion_test):
_run_test(ion_test,
_writer_provider(_reader_provider("text"),
_to_buffer(ion_test, binary=False)))
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_simpleion(ion_test):
def to_ion_hash(algorithm):
if 'ion' in ion_test:
value = ion_test['ion']
if '10n' in ion_test:
ba = bytearray(_IVM_BYTES)
for byte in ion_test['10n']:
ba.append(byte)
value = ion.load(BytesIO(ba))
return value.ion_hash(hash_function_provider=hash_function_provider(algorithm,
_actual_updates,
_actual_digests))
_run_test(ion_test, to_ion_hash)
_actual_updates = []
_actual_digests = []
def _run_test(ion_test, digester):
expect = ion_test['expect']
for algorithm in expect:
expected_updates = []
expected_digests = []
final_digest = None
for sexp in expect[algorithm]:
annot = sexp.ion_annotations[0].text
if annot == "update":
expected_updates.append(sexp_to_bytearray(sexp))
pass
elif annot == "digest":
expected_digests.append(sexp_to_bytearray(sexp))
elif annot == "final_digest":
final_digest = sexp_to_bytearray(sexp)
_actual_updates.clear()
_actual_digests.clear()
actual_digest_bytes = digester(algorithm)
if len(expected_updates) > 0:
assert _actual_updates == expected_updates
if final_digest is not None:
assert _actual_digests[-1] == final_digest
assert actual_digest_bytes == final_digest
else:
assert _actual_digests == expected_digests
assert actual_digest_bytes == expected_digests[-1]
def _reader_provider(type):
def _f():
if type == "binary":
return binary_reader()
elif type == "text":
return text_reader(is_unicode=True)
return _f
| # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from six import StringIO
from io import BytesIO
from os.path import abspath, join
import amazon.ion.simpleion as ion
import amazon.ion.reader as ion_reader
from amazon.ion.reader_managed import managed_reader
from amazon.ion.reader_text import text_reader
from amazon.ion.reader_binary import binary_reader
from amazon.ion.reader import NEXT_EVENT
from amazon.ion.reader import SKIP_EVENT
from amazon.ion.writer import blocking_writer
from amazon.ion.writer_text import raw_writer
from amazon.ion.core import IonEventType
from amazon.ion.core import IonEvent
from ionhash.hasher import hash_reader
from ionhash.hasher import hash_writer
from ionhash.hasher import HashEvent
from .util import hash_function_provider
from .util import sexp_to_bytearray
def _test_data(algorithm):
path = abspath(join(abspath(__file__), '..', '..', 'ion-hash-test', 'ion_hash_tests.ion'))
f = open(path)
ion_tests = ion.loads(f.read(), single_value=False)
f.close()
def _has_algorithm(ion_test):
return algorithm in ion_test['expect']
return filter(_has_algorithm, ion_tests)
_IVM = "$ion_1_0 "
_IVM_BYTES = [0xE0, 0x01, 0x00, 0xEA]
def _test_name(ion_test):
if len(ion_test.ion_annotations) > 0:
test_name = ion_test.ion_annotations[0].text
else:
test_name = str(ion.dumps(ion_test['ion'], binary=False))
if test_name.startswith(_IVM):
test_name = test_name[len(_IVM):]
return " " + test_name
def _to_buffer(ion_test, binary):
if 'ion' in ion_test:
v = ion.dumps(ion_test['ion'], binary=binary)
if '10n' in ion_test:
v = bytearray(_IVM_BYTES)
for byte in ion_test['10n']:
v.append(byte)
if not binary:
value = ion.load(BytesIO(v))
v = ion.dumps(value, binary=False)
if binary:
return BytesIO(v)
else:
return StringIO(v)
def _consumer_provider(reader_provider, buf):
def _f(algorithm):
buf.seek(0)
reader = hash_reader(
ion_reader.blocking_reader(managed_reader(reader_provider(), None), buf),
hash_function_provider(algorithm, _actual_updates, _actual_digests))
_consume(reader)
return reader.send(HashEvent.DIGEST)
return _f
def _consume(reader, writer=None):
event = reader.send(NEXT_EVENT)
if writer is not None:
writer.send(event)
while event.event_type is not IonEventType.STREAM_END:
event = reader.send(NEXT_EVENT)
if writer is not None:
writer.send(event)
def _writer_provider(reader_provider, buf):
def _f(algorithm):
buf.seek(0)
reader = ion_reader.blocking_reader(managed_reader(reader_provider(), None), buf)
writer = hash_writer(
blocking_writer(raw_writer(), BytesIO()),
hash_function_provider(algorithm, _actual_updates, _actual_digests))
_consume(reader, writer)
digest = writer.send(HashEvent.DIGEST)
writer.send(IonEvent(IonEventType.STREAM_END))
return digest
return _f
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_binary(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("binary"),
_to_buffer(ion_test, binary=True)))
@pytest.mark.parametrize("ion_test", _test_data("md5"), ids=_test_name)
def test_binary_md5(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("binary"),
_to_buffer(ion_test, binary=True)))
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_text(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("text"),
_to_buffer(ion_test, binary=False)))
@pytest.mark.parametrize("ion_test", _test_data("md5"), ids=_test_name)
def test_text_md5(ion_test):
_run_test(ion_test,
_consumer_provider(_reader_provider("text"),
_to_buffer(ion_test, binary=False)))
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_skip_over(ion_test):
buf = _to_buffer(ion_test, binary=True)
def skipping_consumer(algorithm):
buf.seek(0)
reader = hash_reader(
ion_reader.blocking_reader(managed_reader(_reader_provider("binary")(), None), buf),
hash_function_provider(algorithm, _actual_updates, _actual_digests))
event = reader.send(NEXT_EVENT)
while event.event_type != IonEventType.STREAM_END:
if event.event_type == IonEventType.CONTAINER_START:
event = reader.send(SKIP_EVENT)
else:
event = reader.send(NEXT_EVENT)
return reader.send(HashEvent.DIGEST)
_run_test(ion_test, skipping_consumer)
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_writer(ion_test):
_run_test(ion_test,
_writer_provider(_reader_provider("text"),
_to_buffer(ion_test, binary=False)))
@pytest.mark.parametrize("ion_test", _test_data("identity"), ids=_test_name)
def test_simpleion(ion_test):
def to_ion_hash(algorithm):
if 'ion' in ion_test:
value = ion_test['ion']
if '10n' in ion_test:
ba = bytearray(_IVM_BYTES)
for byte in ion_test['10n']:
ba.append(byte)
value = ion.load(BytesIO(ba))
return value.ion_hash(hash_function_provider=hash_function_provider(algorithm,
_actual_updates,
_actual_digests))
_run_test(ion_test, to_ion_hash)
_actual_updates = []
_actual_digests = []
def _run_test(ion_test, digester):
expect = ion_test['expect']
for algorithm in expect:
expected_updates = []
expected_digests = []
final_digest = None
for sexp in expect[algorithm]:
annot = sexp.ion_annotations[0].text
if annot == "update":
expected_updates.append(sexp_to_bytearray(sexp))
pass
elif annot == "digest":
expected_digests.append(sexp_to_bytearray(sexp))
elif annot == "final_digest":
final_digest = sexp_to_bytearray(sexp)
_actual_updates.clear()
_actual_digests.clear()
actual_digest_bytes = digester(algorithm)
if len(expected_updates) > 0:
assert _actual_updates == expected_updates
if final_digest is not None:
assert _actual_digests[-1] == final_digest
assert actual_digest_bytes == final_digest
else:
assert _actual_digests == expected_digests
assert actual_digest_bytes == expected_digests[-1]
def _reader_provider(type):
def _f():
if type == "binary":
return binary_reader()
elif type == "text":
return text_reader(is_unicode=True)
return _f | en | 0.874973 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. | 1.802662 | 2 |
lib/FluentDB/helpers/decorator.py | olavoasantos/FluentDB | 2 | 6630121 | from functools import wraps
def decorate_all_functions(function_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
setattr(cls, name, function_decorator(obj))
return cls
return decorator
def beforeAfterCall(func):
@wraps(func)
def wrapper(*args, **kw):
name = func.__name__
# before
if getattr(args[0], "before{0}".format(name.capitalize()), None):
getattr(args[0], "before{0}".format(name.capitalize()))(*args)
try:
res = func(*args, **kw)
finally:
# After
if getattr(args[0], "after{0}".format(name.capitalize()), None):
getattr(args[0], "after{0}".format(name.capitalize()))(*args)
return res
return wrapper | from functools import wraps
def decorate_all_functions(function_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
setattr(cls, name, function_decorator(obj))
return cls
return decorator
def beforeAfterCall(func):
@wraps(func)
def wrapper(*args, **kw):
name = func.__name__
# before
if getattr(args[0], "before{0}".format(name.capitalize()), None):
getattr(args[0], "before{0}".format(name.capitalize()))(*args)
try:
res = func(*args, **kw)
finally:
# After
if getattr(args[0], "after{0}".format(name.capitalize()), None):
getattr(args[0], "after{0}".format(name.capitalize()))(*args)
return res
return wrapper | en | 0.914316 | # before # After | 3.479679 | 3 |
ntnui/apps/authentication/views/user.py | kapteinstein/tdt4290 | 0 | 6630122 | <filename>ntnui/apps/authentication/views/user.py
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views import View
from django.contrib.auth import login as auth_login
from authentication.forms import SignUpForm
class UserSignup(View):
''' Returns an html template containing user settings '''
template_name = 'registration/signup.html'
def get(self, request):
form = SignUpForm()
context = {
'form': form
}
return render(request, self.template_name, context)
def post(self, request):
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
auth_login(request, user,
backend='django.contrib.auth.backends.ModelBackend')
return redirect('home')
context = {
'form': form
}
return render(request, self.template_name, context)
| <filename>ntnui/apps/authentication/views/user.py
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views import View
from django.contrib.auth import login as auth_login
from authentication.forms import SignUpForm
class UserSignup(View):
''' Returns an html template containing user settings '''
template_name = 'registration/signup.html'
def get(self, request):
form = SignUpForm()
context = {
'form': form
}
return render(request, self.template_name, context)
def post(self, request):
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
auth_login(request, user,
backend='django.contrib.auth.backends.ModelBackend')
return redirect('home')
context = {
'form': form
}
return render(request, self.template_name, context)
| en | 0.521293 | Returns an html template containing user settings | 2.071647 | 2 |
src/opera/parser/tosca/path.py | radon-h2020/xopera-opera | 0 | 6630123 | import pathlib
from .string import String
class Path(String):
@classmethod
def build(cls, yaml_node):
return cls(pathlib.PurePath(yaml_node.value), yaml_node.loc)
def prefix_path(self, parent_path):
if not self.data.is_absolute():
self.data = parent_path / self.data
def resolve_path(self, base_path):
# Absolute path is relative to the CSAR root folder, so we need to
# strip the root off of it.
if self.data.is_absolute():
path = self.data.relative_to(self.data.root)
else:
path = self.data
self.data = self._compact_path(path)
self._validate_path(base_path)
@staticmethod
def _compact_path(path):
# Next loop removes as many path/.. pairs as possible. When the path
# is in its canonical form, it should not start with .. since that
# would mean that something is trying to access paths outside the
# CSAR.
#
# Examples:
# some/path/.. -> some
# ../paths/here -> ../paths/here
# my/../../path -> ../path
pos = 1
parts = list(path.parts)
while pos < len(parts):
if parts[pos] == ".." and parts[pos - 1] != "..":
del parts[pos]
del parts[pos - 1]
pos = max(pos - 1, 1)
else:
pos += 1
return pathlib.PurePath(*parts)
def _validate_path(self, base_path):
# Abstract checks
if str(self.data) == ".":
self.abort("Path points to the CSAR root.", self.loc)
if self.data.parts[0] == "..":
self.abort("Path points outside the CSAR.", self.loc)
# Concrete checks
abs_path = base_path / self.data
if not abs_path.exists():
self.abort("Path {} does not exist.".format(abs_path), self.loc)
# We test for symlinks separately since is_dir() and is_file() return
# True on symlinks and this is not what we want.
if abs_path.is_symlink():
self.abort("Path {} is a symlink.".format(abs_path), self.loc)
if not abs_path.is_dir() and not abs_path.is_file():
self.abort("Path {} is not file or folder.".format(abs_path), self.loc)
| import pathlib
from .string import String
class Path(String):
@classmethod
def build(cls, yaml_node):
return cls(pathlib.PurePath(yaml_node.value), yaml_node.loc)
def prefix_path(self, parent_path):
if not self.data.is_absolute():
self.data = parent_path / self.data
def resolve_path(self, base_path):
# Absolute path is relative to the CSAR root folder, so we need to
# strip the root off of it.
if self.data.is_absolute():
path = self.data.relative_to(self.data.root)
else:
path = self.data
self.data = self._compact_path(path)
self._validate_path(base_path)
@staticmethod
def _compact_path(path):
# Next loop removes as many path/.. pairs as possible. When the path
# is in its canonical form, it should not start with .. since that
# would mean that something is trying to access paths outside the
# CSAR.
#
# Examples:
# some/path/.. -> some
# ../paths/here -> ../paths/here
# my/../../path -> ../path
pos = 1
parts = list(path.parts)
while pos < len(parts):
if parts[pos] == ".." and parts[pos - 1] != "..":
del parts[pos]
del parts[pos - 1]
pos = max(pos - 1, 1)
else:
pos += 1
return pathlib.PurePath(*parts)
def _validate_path(self, base_path):
# Abstract checks
if str(self.data) == ".":
self.abort("Path points to the CSAR root.", self.loc)
if self.data.parts[0] == "..":
self.abort("Path points outside the CSAR.", self.loc)
# Concrete checks
abs_path = base_path / self.data
if not abs_path.exists():
self.abort("Path {} does not exist.".format(abs_path), self.loc)
# We test for symlinks separately since is_dir() and is_file() return
# True on symlinks and this is not what we want.
if abs_path.is_symlink():
self.abort("Path {} is a symlink.".format(abs_path), self.loc)
if not abs_path.is_dir() and not abs_path.is_file():
self.abort("Path {} is not file or folder.".format(abs_path), self.loc)
| en | 0.941428 | # Absolute path is relative to the CSAR root folder, so we need to # strip the root off of it. # Next loop removes as many path/.. pairs as possible. When the path # is in its canonical form, it should not start with .. since that # would mean that something is trying to access paths outside the # CSAR. # # Examples: # some/path/.. -> some # ../paths/here -> ../paths/here # my/../../path -> ../path # Abstract checks # Concrete checks # We test for symlinks separately since is_dir() and is_file() return # True on symlinks and this is not what we want. | 2.828253 | 3 |
datahub/metadata/test/test_admin.py | Staberinde/data-hub-api | 6 | 6630124 | from unittest.mock import Mock
import factory
import pytest
from django.contrib.admin import site
from django.test import RequestFactory
from datahub.metadata.admin import ServiceAdmin
from datahub.metadata.models import Service
from datahub.metadata.test.factories import ServiceFactory
@pytest.mark.django_db
class TestServiceAdmin:
"""Tests for ServiceAdmin."""
@pytest.mark.parametrize('context', (Service.Context.INTERACTION, Service.Context.EVENT))
def test_context_filter(self, context):
"""Tests filtering by context."""
test_data_contexts = (
[Service.Context.INTERACTION],
[Service.Context.SERVICE_DELIVERY],
[Service.Context.INTERACTION, Service.Context.SERVICE_DELIVERY],
)
ServiceFactory.create_batch(
len(test_data_contexts),
contexts=factory.Iterator(test_data_contexts),
)
model_admin = ServiceAdmin(Service, site)
request_factory = RequestFactory()
request = request_factory.get(
'/',
data={'context': context},
)
request.user = Mock()
change_list = model_admin.get_changelist_instance(request)
actual_services = list(change_list.get_queryset(request))
service_count_for_context = Service.objects.filter(contexts__overlap=[context]).count()
assert len(actual_services) == service_count_for_context
assert all(context in service.contexts for service in actual_services)
def test_no_filter(self):
"""Test that if no filter is selected, all services are returned."""
ServiceFactory.create_batch(5)
model_admin = ServiceAdmin(Service, site)
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = Mock()
change_list = model_admin.get_changelist_instance(request)
actual_services = change_list.get_queryset(request)
assert actual_services.count() == Service.objects.count()
| from unittest.mock import Mock
import factory
import pytest
from django.contrib.admin import site
from django.test import RequestFactory
from datahub.metadata.admin import ServiceAdmin
from datahub.metadata.models import Service
from datahub.metadata.test.factories import ServiceFactory
@pytest.mark.django_db
class TestServiceAdmin:
"""Tests for ServiceAdmin."""
@pytest.mark.parametrize('context', (Service.Context.INTERACTION, Service.Context.EVENT))
def test_context_filter(self, context):
"""Tests filtering by context."""
test_data_contexts = (
[Service.Context.INTERACTION],
[Service.Context.SERVICE_DELIVERY],
[Service.Context.INTERACTION, Service.Context.SERVICE_DELIVERY],
)
ServiceFactory.create_batch(
len(test_data_contexts),
contexts=factory.Iterator(test_data_contexts),
)
model_admin = ServiceAdmin(Service, site)
request_factory = RequestFactory()
request = request_factory.get(
'/',
data={'context': context},
)
request.user = Mock()
change_list = model_admin.get_changelist_instance(request)
actual_services = list(change_list.get_queryset(request))
service_count_for_context = Service.objects.filter(contexts__overlap=[context]).count()
assert len(actual_services) == service_count_for_context
assert all(context in service.contexts for service in actual_services)
def test_no_filter(self):
"""Test that if no filter is selected, all services are returned."""
ServiceFactory.create_batch(5)
model_admin = ServiceAdmin(Service, site)
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = Mock()
change_list = model_admin.get_changelist_instance(request)
actual_services = change_list.get_queryset(request)
assert actual_services.count() == Service.objects.count()
| en | 0.938628 | Tests for ServiceAdmin. Tests filtering by context. Test that if no filter is selected, all services are returned. | 2.160292 | 2 |
gestao/recursos_humanos/models/basico/Formacao.py | Smartboxweb98/gestao_empresarial | 3 | 6630125 | # -*- coding: utf-8 -*-
from django.db import models
from gestao.basico.models.formacao.Curso import Curso
from gestao.recursos_humanos.models.funcionario.Funcionario import Funcionario
class Formacao(models.Model):
funcionario = models.ForeignKey(Funcionario, verbose_name="Funcionário")
curso = models.ForeignKey(Curso, verbose_name="Curso")
ano = models.IntegerField(verbose_name="Ano de Formação")
def __unicode__(self):
return u' %s : %s (%s)' % (self.funcionario.nome_completo, self.curso, self.ano)
class Meta:
app_label = 'recursos_humanos'
verbose_name = 'Formação'
verbose_name_plural = 'Formações'
| # -*- coding: utf-8 -*-
from django.db import models
from gestao.basico.models.formacao.Curso import Curso
from gestao.recursos_humanos.models.funcionario.Funcionario import Funcionario
class Formacao(models.Model):
funcionario = models.ForeignKey(Funcionario, verbose_name="Funcionário")
curso = models.ForeignKey(Curso, verbose_name="Curso")
ano = models.IntegerField(verbose_name="Ano de Formação")
def __unicode__(self):
return u' %s : %s (%s)' % (self.funcionario.nome_completo, self.curso, self.ano)
class Meta:
app_label = 'recursos_humanos'
verbose_name = 'Formação'
verbose_name_plural = 'Formações'
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.19984 | 2 |
src/handlers/simulation_handler/base_simulation_handler.py | superdurszlak/SubroutineTest | 14 | 6630126 | <reponame>superdurszlak/SubroutineTest
import abc
from Tkconstants import E, W
from Tkinter import Label, Entry
import config
from src.utils import is_positive_float
class BaseSimulationHandler:
"""
Base class for all simulation handlers
"""
def __init__(self):
self._validator = None
def populate(self, frame):
"""
Clear frame object and populate it with handler's controls
:param frame: Frame object to which UI controls will be bound
:return: None
"""
for child in frame.winfo_children():
child.destroy()
self._validator = frame.register(is_positive_float)
self._populate(frame)
@abc.abstractmethod
def _populate(self, frame):
"""
Class-specific inner implementation of populate method
:param frame: Frame object to which UI controls will be bound
:return: None
"""
pass
@abc.abstractproperty
def parameters(self):
"""
Dictionary of parameters relevant for this handler and possibly nested handlers
:return: dictionary of parameters
"""
pass
@abc.abstractproperty
def builders(self):
"""
Get all builders relevant for this handler and possibly nested handlers
:return: list of builders, with each builder tied to previous one as 'next_builder'
"""
pass
@staticmethod
def _create_entry_line(variable, name, unit, frame, row_index, validator):
"""
Create label and entry for given variable
:param variable: DoubleVar that has to be handled
:param name: visible name of entry
:param unit: unit of variable
:param frame: parent frame for entry
:param row_index: grid row in which the entry will be placed
:param validator: Value validator
:return: None
"""
if name is not None:
variable_label = Label(frame, text=name)
variable_label.grid(column=0, row=row_index, sticky=E, padx=config.ELEMENT_PADDING,
pady=config.ELEMENT_PADDING)
if validator is not None:
variable_entry = Entry(frame, textvariable=variable, validate='focusout', validatecommand=validator)
else:
variable_entry = Entry(frame, textvariable=variable)
variable_entry.grid(column=1, row=row_index, sticky=W, padx=config.ELEMENT_PADDING, pady=config.ELEMENT_PADDING)
if unit is not None:
variable_unit = Label(frame, text=unit)
variable_unit.grid(column=2, row=row_index, sticky=W, padx=config.ELEMENT_PADDING,
pady=config.ELEMENT_PADDING)
| import abc
from Tkconstants import E, W
from Tkinter import Label, Entry
import config
from src.utils import is_positive_float
class BaseSimulationHandler:
"""
Base class for all simulation handlers
"""
def __init__(self):
self._validator = None
def populate(self, frame):
"""
Clear frame object and populate it with handler's controls
:param frame: Frame object to which UI controls will be bound
:return: None
"""
for child in frame.winfo_children():
child.destroy()
self._validator = frame.register(is_positive_float)
self._populate(frame)
@abc.abstractmethod
def _populate(self, frame):
"""
Class-specific inner implementation of populate method
:param frame: Frame object to which UI controls will be bound
:return: None
"""
pass
@abc.abstractproperty
def parameters(self):
"""
Dictionary of parameters relevant for this handler and possibly nested handlers
:return: dictionary of parameters
"""
pass
@abc.abstractproperty
def builders(self):
"""
Get all builders relevant for this handler and possibly nested handlers
:return: list of builders, with each builder tied to previous one as 'next_builder'
"""
pass
@staticmethod
def _create_entry_line(variable, name, unit, frame, row_index, validator):
"""
Create label and entry for given variable
:param variable: DoubleVar that has to be handled
:param name: visible name of entry
:param unit: unit of variable
:param frame: parent frame for entry
:param row_index: grid row in which the entry will be placed
:param validator: Value validator
:return: None
"""
if name is not None:
variable_label = Label(frame, text=name)
variable_label.grid(column=0, row=row_index, sticky=E, padx=config.ELEMENT_PADDING,
pady=config.ELEMENT_PADDING)
if validator is not None:
variable_entry = Entry(frame, textvariable=variable, validate='focusout', validatecommand=validator)
else:
variable_entry = Entry(frame, textvariable=variable)
variable_entry.grid(column=1, row=row_index, sticky=W, padx=config.ELEMENT_PADDING, pady=config.ELEMENT_PADDING)
if unit is not None:
variable_unit = Label(frame, text=unit)
variable_unit.grid(column=2, row=row_index, sticky=W, padx=config.ELEMENT_PADDING,
pady=config.ELEMENT_PADDING) | en | 0.790115 | Base class for all simulation handlers Clear frame object and populate it with handler's controls :param frame: Frame object to which UI controls will be bound :return: None Class-specific inner implementation of populate method :param frame: Frame object to which UI controls will be bound :return: None Dictionary of parameters relevant for this handler and possibly nested handlers :return: dictionary of parameters Get all builders relevant for this handler and possibly nested handlers :return: list of builders, with each builder tied to previous one as 'next_builder' Create label and entry for given variable :param variable: DoubleVar that has to be handled :param name: visible name of entry :param unit: unit of variable :param frame: parent frame for entry :param row_index: grid row in which the entry will be placed :param validator: Value validator :return: None | 3.234721 | 3 |
bot.py | w7cep/Froakie | 1 | 6630127 | <filename>bot.py<gh_stars>1-10
import os
import aiohttp
import nextcord
import nextcord.ext
from nextcord.ext import commands, tasks
import platform
import config
# TODO: #1 Fine tune command permissions.
# TODO: #2 Delete every excess space / Convert files with space indents to tabs.
def main():
# allows privledged intents for monitoring members joining, roles editing, and role assignments
intents = nextcord.Intents.all()
activity = nextcord.Activity(type=nextcord.ActivityType.listening, name=f"{config.BOT_STATUS}")
bot = commands.Bot(command_prefix=config.PREFIX, intents=intents, activity=activity)
# boolean that will be set to true when views are added
bot.persistent_views_added = False
@bot.event
async def on_ready():
member_count = 0
guild_string = ""
for g in bot.guilds:
guild_string += f"{g.name} - {g.id} - Members: {g.member_count}\n"
member_count += g.member_count
print(f"Bot: '{bot.user.name}' has connected to Discord, active on {len(bot.guilds)} guilds:\n{guild_string}")
print(f"Nextcord API version: {nextcord.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} {platform.release()} ({os.name})")
channel = bot.get_channel(907496711872729128) # Gets channel from internal cache
await channel.send(f"{bot.user.name} is connected to {len(bot.guilds)} guilds:\n{guild_string}\nNextcord API version: {nextcord.__version__}\nPython version: {platform.python_version()}\nRunning on: {platform.system()} {platform.release()} ({os.name})") # Sends message to channel
# load all cogs
for folder in os.listdir("cogs"):
if os.path.exists(os.path.join("cogs", folder, "cog.py")):
bot.load_extension(f"cogs.{folder}.cog")
async def startup():
bot.session = aiohttp.ClientSession()
bot.loop.create_task(startup())
# run the bot
bot.run(config.BOT_TOKEN)
if __name__ == "__main__":
main()
| <filename>bot.py<gh_stars>1-10
import os
import aiohttp
import nextcord
import nextcord.ext
from nextcord.ext import commands, tasks
import platform
import config
# TODO: #1 Fine tune command permissions.
# TODO: #2 Delete every excess space / Convert files with space indents to tabs.
def main():
# allows privledged intents for monitoring members joining, roles editing, and role assignments
intents = nextcord.Intents.all()
activity = nextcord.Activity(type=nextcord.ActivityType.listening, name=f"{config.BOT_STATUS}")
bot = commands.Bot(command_prefix=config.PREFIX, intents=intents, activity=activity)
# boolean that will be set to true when views are added
bot.persistent_views_added = False
@bot.event
async def on_ready():
member_count = 0
guild_string = ""
for g in bot.guilds:
guild_string += f"{g.name} - {g.id} - Members: {g.member_count}\n"
member_count += g.member_count
print(f"Bot: '{bot.user.name}' has connected to Discord, active on {len(bot.guilds)} guilds:\n{guild_string}")
print(f"Nextcord API version: {nextcord.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} {platform.release()} ({os.name})")
channel = bot.get_channel(907496711872729128) # Gets channel from internal cache
await channel.send(f"{bot.user.name} is connected to {len(bot.guilds)} guilds:\n{guild_string}\nNextcord API version: {nextcord.__version__}\nPython version: {platform.python_version()}\nRunning on: {platform.system()} {platform.release()} ({os.name})") # Sends message to channel
# load all cogs
for folder in os.listdir("cogs"):
if os.path.exists(os.path.join("cogs", folder, "cog.py")):
bot.load_extension(f"cogs.{folder}.cog")
async def startup():
bot.session = aiohttp.ClientSession()
bot.loop.create_task(startup())
# run the bot
bot.run(config.BOT_TOKEN)
if __name__ == "__main__":
main()
| en | 0.821011 | # TODO: #1 Fine tune command permissions. # TODO: #2 Delete every excess space / Convert files with space indents to tabs. # allows privledged intents for monitoring members joining, roles editing, and role assignments # boolean that will be set to true when views are added # Gets channel from internal cache # Sends message to channel # load all cogs # run the bot | 2.443515 | 2 |
test/conftest.py | mkauf/mod_md | 0 | 6630128 | <filename>test/conftest.py
from TestEnv import TestEnv
def pytest_report_header(config, startdir):
TestEnv.init()
return "mod_md: {version} [apache: {aversion}({prefix}), mod_{ssl}]".format(
version=TestEnv.A2MD_VERSION,
prefix=TestEnv.PREFIX,
aversion=TestEnv.get_httpd_version(),
ssl=TestEnv.get_ssl_module(),
) | <filename>test/conftest.py
from TestEnv import TestEnv
def pytest_report_header(config, startdir):
TestEnv.init()
return "mod_md: {version} [apache: {aversion}({prefix}), mod_{ssl}]".format(
version=TestEnv.A2MD_VERSION,
prefix=TestEnv.PREFIX,
aversion=TestEnv.get_httpd_version(),
ssl=TestEnv.get_ssl_module(),
) | none | 1 | 1.833969 | 2 |
|
sandbox_request/config.py | ghga-de/sandbox-request | 0 | 6630129 | <reponame>ghga-de/sandbox-request<gh_stars>0
# Copyright 2021 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides Configuration for the API
"""
from typing import Dict
from functools import lru_cache
from ghga_service_chassis_lib.config import config_from_yaml
from ghga_service_chassis_lib.api import ApiConfigBase
@config_from_yaml(prefix="sandbox_request")
class Config(ApiConfigBase):
"""
Config class that extends ghga_service_chassis_lib.api.ApiConfigBase
"""
# config parameter needed for the api server
# are inherited from ApiConfigBase
# additional parameters will go here:
db_url: str = "mongodb://localhost:27017"
db_name: str = "sandbox_request_db"
fastapi_options: Dict = {
"root_path": "/",
"openapi_url": "/openapi.json",
"docs_url": "/docs",
}
svc_metadata_url: str
rabbitmq_host: str = "rabbitmq"
rabbitmq_port: int = 5672
topic_name_download_requested: str = "download_request"
topic_name_send_notification: str = "send_notification"
data_requester_email: str
data_requester_name: str = "<NAME>"
data_steward_email: str
data_steward_name: str = "<NAME>"
@lru_cache
def get_config():
"""
Get the Config object that encapsulates all the
configuration for this application.
"""
return Config()
| # Copyright 2021 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides Configuration for the API
"""
from typing import Dict
from functools import lru_cache
from ghga_service_chassis_lib.config import config_from_yaml
from ghga_service_chassis_lib.api import ApiConfigBase
@config_from_yaml(prefix="sandbox_request")
class Config(ApiConfigBase):
"""
Config class that extends ghga_service_chassis_lib.api.ApiConfigBase
"""
# config parameter needed for the api server
# are inherited from ApiConfigBase
# additional parameters will go here:
db_url: str = "mongodb://localhost:27017"
db_name: str = "sandbox_request_db"
fastapi_options: Dict = {
"root_path": "/",
"openapi_url": "/openapi.json",
"docs_url": "/docs",
}
svc_metadata_url: str
rabbitmq_host: str = "rabbitmq"
rabbitmq_port: int = 5672
topic_name_download_requested: str = "download_request"
topic_name_send_notification: str = "send_notification"
data_requester_email: str
data_requester_name: str = "<NAME>"
data_steward_email: str
data_steward_name: str = "<NAME>"
@lru_cache
def get_config():
"""
Get the Config object that encapsulates all the
configuration for this application.
"""
return Config() | en | 0.738792 | # Copyright 2021 Universität Tübingen, DKFZ and EMBL # for the German Human Genome-Phenome Archive (GHGA) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This module provides Configuration for the API Config class that extends ghga_service_chassis_lib.api.ApiConfigBase # config parameter needed for the api server # are inherited from ApiConfigBase # additional parameters will go here: Get the Config object that encapsulates all the configuration for this application. | 1.87253 | 2 |
dyros_jet_smach/scripts/mini_drc_mission_toplevel.py | Junhyung-Kim/dyros_jet | 2 | 6630130 | <reponame>Junhyung-Kim/dyros_jet<filename>dyros_jet_smach/scripts/mini_drc_mission_toplevel.py<gh_stars>1-10
#!/usr/bin/env python
import roslib; roslib.load_manifest('dyros_jet_smach')
import rospy
from smach import StateMachine
import smach_ros
import smach
from dyros_jet_msgs.msg import JointControlAction, JointControlGoal
import rt_dynamixel_msgs.srv
from actionlib import *
from actionlib_msgs.msg import *
from smach_ros import SimpleActionState
from smach_ros import ServiceState
from std_msgs.msg import String
class StringTransitionState(smach.State):
topic=''
def __init__(self, topic, outcomes=[], input_keys=[], output_keys=[]):
self._topic = topic
smach.State.__init__(self, outcomes, input_keys, output_keys)
def execute(self, userdata):
print(self._topic)
while True:
print('wait for message')
trans_tag = rospy.wait_for_message(self._topic,String)
print(trans_tag.data)
if trans_tag.data in self._outcomes:
return trans_tag.data
def main():
rospy.init_node('mini_drc_toplevel')
topic_name = '/dyros_jet/smach/transition'
joint_init_goal = JointControlGoal()
joint_init_goal.command.name = ['L_HipYaw','L_HipRoll','L_HipPitch','L_KneePitch','L_AnklePitch','L_AnkleRoll','R_HipYaw','R_HipRoll','R_HipPitch','R_KneePitch','R_AnklePitch','R_AnkleRoll','WaistPitch','WaistYaw', 'L_ShoulderPitch','L_ShoulderRoll','L_ShoulderYaw','L_ElbowRoll','L_WristYaw','L_WristRoll','L_HandYaw', 'R_ShoulderPitch','R_ShoulderRoll','R_ShoulderYaw','R_ElbowRoll','R_WristYaw','R_WristRoll','R_HandYaw','HeadYaw', 'HeadPitch', 'R_Gripper', 'L_Gripper']
#msg.enable = [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False]
joint_init_goal.command.position = [0 , 0.034906585 , -0.034906585 , 0.733038285 , -0.6981317 , -0.034906585, 0 , -0.034906585 , 0.0349065850 , -0.733038285 , 0.6981317 , 0.034906585, 0 , 0, 0.6981317008 , -1.6580627893 , -1.3962634016 , -1.9198621771 , 0 , -1.2217304764 , -0.1745329252, -0.6981317008 , 1.6580627893 , 1.3962634016 , 1.9198621771 , 0 , 1.2217304764 , 0.17453292519, 0 , 0 , 0 , 0]
joint_init_goal.command.duration = [3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 0 , 0 , 0 , 0]
# Construct state machine
mini_drc_sm = StateMachine(
outcomes=['finished','aborted','preempted'])
#run_mode = rospy.get_param('~run_mode', 'simulation')
run_mode = rospy.get_param('~run_mode', 'real_robot')
print(run_mode)
# Set the initial state explicitly
if run_mode == 'simulation':
mini_drc_sm.set_initial_state(['READY'])
elif run_mode == 'mujoco':
mini_drc_sm.set_initial_state(['READY'])
elif run_mode == 'real_robot':
mini_drc_sm.set_initial_state(['POWER_OFF'])
else:
print ("unknown mode")
#request = rt_dynamixel_msgs.srv.ModeSettingRequest(rt_dynamixel_msgs.srv.ModeSettingRequest.SETTING)
#print(request)
#request = rt_dynamixel_msgs.srv.MotorSettingRequest(
#mode=rt_dynamixel_msgs.srv.MotorSettingRequest.SET_TORQUE_ENABLE,value=1)
#print(request)
with mini_drc_sm:
StateMachine.add('POWER_OFF',
StringTransitionState(topic_name, outcomes=['power_on']),
{'power_on':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('SET_DXL_MODE_SETTING_MODE',
ServiceState('/rt_dynamixel/mode', rt_dynamixel_msgs.srv.ModeSetting,
request = rt_dynamixel_msgs.srv.ModeSettingRequest(
rt_dynamixel_msgs.srv.ModeSettingRequest.SETTING)),
transitions={'succeeded':'SET_DXL_TORQUE_ON', 'aborted':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('SET_DXL_TORQUE_ON',
ServiceState('/rt_dynamixel/motor_set', rt_dynamixel_msgs.srv.MotorSetting,
request = rt_dynamixel_msgs.srv.MotorSettingRequest(
mode=rt_dynamixel_msgs.srv.MotorSettingRequest.SET_TORQUE_ENABLE,value=1)),
transitions={'succeeded':'SET_DXL_SYNC_DRIVE_ON', 'aborted':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('SET_DXL_SYNC_DRIVE_ON',
ServiceState('/rt_dynamixel/mode', rt_dynamixel_msgs.srv.ModeSetting,
request = rt_dynamixel_msgs.srv.ModeSettingRequest(
rt_dynamixel_msgs.srv.ModeSettingRequest.CONTROL_RUN)),
transitions={'succeeded':'READY', 'aborted':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('READY',
StringTransitionState(topic_name, outcomes=['initialize_pose']),
transitions={'initialize_pose':'SET_INIT_POSITION'})
StateMachine.add('SET_INIT_POSITION',
SimpleActionState('/dyros_jet/joint_control', JointControlAction, goal=joint_init_goal),
transitions={'succeeded':'READY_TO_MOVE'})
StateMachine.add('READY_TO_MOVE',
StringTransitionState(topic_name, outcomes=['Mot1']),
transitions={'Mot1':'Motion1'})
StateMachine.add('Motion1',
StringTransitionState(topic_name, outcomes=['Mot2']),
transitions={'Mot2':'Motion2'})
StateMachine.add('Motion2',
StringTransitionState(topic_name, outcomes=['stair', 'door', 'initialize_pose1']),
transitions={'stair':'finished', 'door':'finished', 'initialize_pose1':'SET_INIT_POSITION'})
# Run state machine introspection server
intro_server = smach_ros.IntrospectionServer('dyros_jet',mini_drc_sm,'/MINI_DRC')
intro_server.start()
mini_drc_sm.execute()
rospy.spin()
intro_server.stop()
rospy.signal_shutdown('All done.')
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import roslib; roslib.load_manifest('dyros_jet_smach')
import rospy
from smach import StateMachine
import smach_ros
import smach
from dyros_jet_msgs.msg import JointControlAction, JointControlGoal
import rt_dynamixel_msgs.srv
from actionlib import *
from actionlib_msgs.msg import *
from smach_ros import SimpleActionState
from smach_ros import ServiceState
from std_msgs.msg import String
class StringTransitionState(smach.State):
topic=''
def __init__(self, topic, outcomes=[], input_keys=[], output_keys=[]):
self._topic = topic
smach.State.__init__(self, outcomes, input_keys, output_keys)
def execute(self, userdata):
print(self._topic)
while True:
print('wait for message')
trans_tag = rospy.wait_for_message(self._topic,String)
print(trans_tag.data)
if trans_tag.data in self._outcomes:
return trans_tag.data
def main():
rospy.init_node('mini_drc_toplevel')
topic_name = '/dyros_jet/smach/transition'
joint_init_goal = JointControlGoal()
joint_init_goal.command.name = ['L_HipYaw','L_HipRoll','L_HipPitch','L_KneePitch','L_AnklePitch','L_AnkleRoll','R_HipYaw','R_HipRoll','R_HipPitch','R_KneePitch','R_AnklePitch','R_AnkleRoll','WaistPitch','WaistYaw', 'L_ShoulderPitch','L_ShoulderRoll','L_ShoulderYaw','L_ElbowRoll','L_WristYaw','L_WristRoll','L_HandYaw', 'R_ShoulderPitch','R_ShoulderRoll','R_ShoulderYaw','R_ElbowRoll','R_WristYaw','R_WristRoll','R_HandYaw','HeadYaw', 'HeadPitch', 'R_Gripper', 'L_Gripper']
#msg.enable = [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False]
joint_init_goal.command.position = [0 , 0.034906585 , -0.034906585 , 0.733038285 , -0.6981317 , -0.034906585, 0 , -0.034906585 , 0.0349065850 , -0.733038285 , 0.6981317 , 0.034906585, 0 , 0, 0.6981317008 , -1.6580627893 , -1.3962634016 , -1.9198621771 , 0 , -1.2217304764 , -0.1745329252, -0.6981317008 , 1.6580627893 , 1.3962634016 , 1.9198621771 , 0 , 1.2217304764 , 0.17453292519, 0 , 0 , 0 , 0]
joint_init_goal.command.duration = [3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 0 , 0 , 0 , 0]
# Construct state machine
mini_drc_sm = StateMachine(
outcomes=['finished','aborted','preempted'])
#run_mode = rospy.get_param('~run_mode', 'simulation')
run_mode = rospy.get_param('~run_mode', 'real_robot')
print(run_mode)
# Set the initial state explicitly
if run_mode == 'simulation':
mini_drc_sm.set_initial_state(['READY'])
elif run_mode == 'mujoco':
mini_drc_sm.set_initial_state(['READY'])
elif run_mode == 'real_robot':
mini_drc_sm.set_initial_state(['POWER_OFF'])
else:
print ("unknown mode")
#request = rt_dynamixel_msgs.srv.ModeSettingRequest(rt_dynamixel_msgs.srv.ModeSettingRequest.SETTING)
#print(request)
#request = rt_dynamixel_msgs.srv.MotorSettingRequest(
#mode=rt_dynamixel_msgs.srv.MotorSettingRequest.SET_TORQUE_ENABLE,value=1)
#print(request)
with mini_drc_sm:
StateMachine.add('POWER_OFF',
StringTransitionState(topic_name, outcomes=['power_on']),
{'power_on':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('SET_DXL_MODE_SETTING_MODE',
ServiceState('/rt_dynamixel/mode', rt_dynamixel_msgs.srv.ModeSetting,
request = rt_dynamixel_msgs.srv.ModeSettingRequest(
rt_dynamixel_msgs.srv.ModeSettingRequest.SETTING)),
transitions={'succeeded':'SET_DXL_TORQUE_ON', 'aborted':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('SET_DXL_TORQUE_ON',
ServiceState('/rt_dynamixel/motor_set', rt_dynamixel_msgs.srv.MotorSetting,
request = rt_dynamixel_msgs.srv.MotorSettingRequest(
mode=rt_dynamixel_msgs.srv.MotorSettingRequest.SET_TORQUE_ENABLE,value=1)),
transitions={'succeeded':'SET_DXL_SYNC_DRIVE_ON', 'aborted':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('SET_DXL_SYNC_DRIVE_ON',
ServiceState('/rt_dynamixel/mode', rt_dynamixel_msgs.srv.ModeSetting,
request = rt_dynamixel_msgs.srv.ModeSettingRequest(
rt_dynamixel_msgs.srv.ModeSettingRequest.CONTROL_RUN)),
transitions={'succeeded':'READY', 'aborted':'SET_DXL_MODE_SETTING_MODE'})
StateMachine.add('READY',
StringTransitionState(topic_name, outcomes=['initialize_pose']),
transitions={'initialize_pose':'SET_INIT_POSITION'})
StateMachine.add('SET_INIT_POSITION',
SimpleActionState('/dyros_jet/joint_control', JointControlAction, goal=joint_init_goal),
transitions={'succeeded':'READY_TO_MOVE'})
StateMachine.add('READY_TO_MOVE',
StringTransitionState(topic_name, outcomes=['Mot1']),
transitions={'Mot1':'Motion1'})
StateMachine.add('Motion1',
StringTransitionState(topic_name, outcomes=['Mot2']),
transitions={'Mot2':'Motion2'})
StateMachine.add('Motion2',
StringTransitionState(topic_name, outcomes=['stair', 'door', 'initialize_pose1']),
transitions={'stair':'finished', 'door':'finished', 'initialize_pose1':'SET_INIT_POSITION'})
# Run state machine introspection server
intro_server = smach_ros.IntrospectionServer('dyros_jet',mini_drc_sm,'/MINI_DRC')
intro_server.start()
mini_drc_sm.execute()
rospy.spin()
intro_server.stop()
rospy.signal_shutdown('All done.')
if __name__ == '__main__':
main() | en | 0.349935 | #!/usr/bin/env python #msg.enable = [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False] # Construct state machine #run_mode = rospy.get_param('~run_mode', 'simulation') # Set the initial state explicitly #request = rt_dynamixel_msgs.srv.ModeSettingRequest(rt_dynamixel_msgs.srv.ModeSettingRequest.SETTING) #print(request) #request = rt_dynamixel_msgs.srv.MotorSettingRequest( #mode=rt_dynamixel_msgs.srv.MotorSettingRequest.SET_TORQUE_ENABLE,value=1) #print(request) # Run state machine introspection server | 2.094778 | 2 |
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/production.py | obswork/rightstart | 0 | 6630131 | from .base import *
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = get_env_variable("SECRET_KEY")
# Compress static files offline
# http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
ALLOWED_HOSTS = [get_env_variable("HOST_NAME"), ]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": get_env_variable("DB_NAME"),
"USER": get_env_variable("DB_USER"),
"PASSWORD": get_env_variable("DB_PASSWD"),
"HOST": get_env_variable("DB_HOST"),
}
}
INSTALLED_APPS += (
"wagtail.contrib.wagtailfrontendcache",
'gunicorn',
)
# support opbeat
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
WAGTAIL_SITE_NAME = '{{ cookiecutter.project_name }}'
# Send notification emails as a background task using Celery,
# to prevent this from blocking web server threads
# (requires the django-celery package):
# http://celery.readthedocs.org/en/latest/configuration.html
# import djcelery
#
# djcelery.setup_loader()
#
# CELERY_SEND_TASK_ERROR_EMAILS = True
# BROKER_URL = 'redis://'
# Use Redis as the cache backend for extra performance
# (requires the django-redis-cache package):
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'KEY_PREFIX': '{{ cookiecutter.repo_name }}',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
}
}
DEFAULT_FROM_EMAIL = get_env_variable('EMAIL_FROM')
EMAIL_USE_TLS = True
EMAIL_HOST = get_env_variable('EMAIL_HOST')
EMAIL_HOST_USER = get_env_variable('EMAIL_USER')
EMAIL_HOST_PASSWORD = get_env_variable('EMAIL_PASSWD')
EMAIL_PORT = 587
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
},
}
| from .base import *
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = get_env_variable("SECRET_KEY")
# Compress static files offline
# http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
ALLOWED_HOSTS = [get_env_variable("HOST_NAME"), ]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": get_env_variable("DB_NAME"),
"USER": get_env_variable("DB_USER"),
"PASSWORD": get_env_variable("DB_PASSWD"),
"HOST": get_env_variable("DB_HOST"),
}
}
INSTALLED_APPS += (
"wagtail.contrib.wagtailfrontendcache",
'gunicorn',
)
# support opbeat
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
WAGTAIL_SITE_NAME = '{{ cookiecutter.project_name }}'
# Send notification emails as a background task using Celery,
# to prevent this from blocking web server threads
# (requires the django-celery package):
# http://celery.readthedocs.org/en/latest/configuration.html
# import djcelery
#
# djcelery.setup_loader()
#
# CELERY_SEND_TASK_ERROR_EMAILS = True
# BROKER_URL = 'redis://'
# Use Redis as the cache backend for extra performance
# (requires the django-redis-cache package):
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'KEY_PREFIX': '{{ cookiecutter.repo_name }}',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
}
}
DEFAULT_FROM_EMAIL = get_env_variable('EMAIL_FROM')
EMAIL_USE_TLS = True
EMAIL_HOST = get_env_variable('EMAIL_HOST')
EMAIL_HOST_USER = get_env_variable('EMAIL_USER')
EMAIL_HOST_PASSWORD = get_env_variable('EMAIL_PASSWD')
EMAIL_PORT = 587
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
},
}
| en | 0.585869 | # Compress static files offline # http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE # support opbeat # Send notification emails as a background task using Celery, # to prevent this from blocking web server threads # (requires the django-celery package): # http://celery.readthedocs.org/en/latest/configuration.html # import djcelery # # djcelery.setup_loader() # # CELERY_SEND_TASK_ERROR_EMAILS = True # BROKER_URL = 'redis://' # Use Redis as the cache backend for extra performance # (requires the django-redis-cache package): # http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache # Logging | 1.756023 | 2 |
apps/project/views/task.py | youssriaboelseod/pyerp | 115 | 6630132 | # Django Library
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
# Localfolder Library
from ..models.task import PyTask
TASK_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Estado', 'field': 'state'},
{'string': 'Proyecto', 'field': 'project_id'},
{'string': 'Notas', 'field': 'note'},
]
TASK_FIELDS_SHORT = ['name','state','project_id','note']
class TaskListView(LoginRequiredMixin, ListView):
model = PyTask
template_name = 'base/list.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskListView, self).get_context_data(**kwargs)
context['title'] = 'Tareas'
context['detail_url'] = 'project:task-detail'
context['add_url'] = 'project:task-add'
context['fields'] = TASK_FIELDS
return context
class TaskDetailView(LoginRequiredMixin, DetailView):
model = PyTask
template_name = 'base/detail.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskDetailView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'project:task', 'name': 'Tarea'}]
context['update_url'] = 'project:task-update'
context['delete_url'] = 'project:task-delete'
context['fields'] = TASK_FIELDS
return context
class TaskCreateView(LoginRequiredMixin, CreateView):
model = PyTask
fields = TASK_FIELDS_SHORT
template_name = 'base/form.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskCreateView, self).get_context_data(**kwargs)
context['title'] = 'Crear Tarea'
context['breadcrumbs'] = [{'url': 'project:task', 'name': 'Tarea'}]
context['back_url'] = reverse('project:task')
return context
class TaskUpdateView(LoginRequiredMixin, UpdateView):
model = PyTask
fields = TASK_FIELDS_SHORT
template_name = 'base/form.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskUpdateView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'project:task', 'name': 'Tarea'}]
context['back_url'] = reverse('project:task-detail', kwargs={'pk': context['object'].pk})
return context
@login_required(login_url="base:login")
def DeleteTask(self, pk):
task = PyTask.objects.get(id=pk)
task.delete()
return redirect(reverse('project:task'))
| # Django Library
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
# Localfolder Library
from ..models.task import PyTask
TASK_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Estado', 'field': 'state'},
{'string': 'Proyecto', 'field': 'project_id'},
{'string': 'Notas', 'field': 'note'},
]
TASK_FIELDS_SHORT = ['name','state','project_id','note']
class TaskListView(LoginRequiredMixin, ListView):
model = PyTask
template_name = 'base/list.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskListView, self).get_context_data(**kwargs)
context['title'] = 'Tareas'
context['detail_url'] = 'project:task-detail'
context['add_url'] = 'project:task-add'
context['fields'] = TASK_FIELDS
return context
class TaskDetailView(LoginRequiredMixin, DetailView):
model = PyTask
template_name = 'base/detail.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskDetailView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'project:task', 'name': 'Tarea'}]
context['update_url'] = 'project:task-update'
context['delete_url'] = 'project:task-delete'
context['fields'] = TASK_FIELDS
return context
class TaskCreateView(LoginRequiredMixin, CreateView):
model = PyTask
fields = TASK_FIELDS_SHORT
template_name = 'base/form.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskCreateView, self).get_context_data(**kwargs)
context['title'] = 'Crear Tarea'
context['breadcrumbs'] = [{'url': 'project:task', 'name': 'Tarea'}]
context['back_url'] = reverse('project:task')
return context
class TaskUpdateView(LoginRequiredMixin, UpdateView):
model = PyTask
fields = TASK_FIELDS_SHORT
template_name = 'base/form.html'
login_url = "login"
def get_context_data(self, **kwargs):
context = super(TaskUpdateView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'project:task', 'name': 'Tarea'}]
context['back_url'] = reverse('project:task-detail', kwargs={'pk': context['object'].pk})
return context
@login_required(login_url="base:login")
def DeleteTask(self, pk):
task = PyTask.objects.get(id=pk)
task.delete()
return redirect(reverse('project:task'))
| en | 0.606719 | # Django Library # Localfolder Library | 1.993126 | 2 |
python/tests/spatial_operator/test_point_join.py | andreicovaliov/incubator-sedona | 747 | 6630133 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import os
from sedona.core.enums import FileDataSplitter, GridType, IndexType
from sedona.core.enums.join_build_side import JoinBuildSide
from sedona.core.spatialOperator import JoinQuery
from sedona.core.spatialOperator.join_params import JoinParams
from tests.spatial_operator.test_join_base import TestJoinBase
from tests.tools import tests_resource
input_location = os.path.join(tests_resource, "arealm-small.csv")
input_location_query_window = os.path.join(tests_resource, "zcta510-small.csv")
offset = 1
splitter = FileDataSplitter.CSV
numPartitions = 11
distance = 0.01
query_polygon_set = os.path.join(tests_resource, "primaryroads-polygon.csv")
inputCount = 3000
inputBoundary = -173.120769, -84.965961, 30.244859, 71.355134
rectangle_match_count = 103
rectangle_match_with_original_duplicates_count = 103
polygon_match_count = 472
polygon_match_with_original_duplicates_count = 562
def pytest_generate_tests(metafunc):
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
metafunc.parametrize(
argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
)
parameters = [
dict(num_partitions=11, grid_type=GridType.QUADTREE),
dict(num_partitions=11, grid_type=GridType.QUADTREE),
dict(num_partitions=11, grid_type=GridType.KDBTREE),
]
class TestRectangleJoin(TestJoinBase):
params = {
"test_nested_loop_with_rectangles": parameters,
"test_nested_loop_with_polygons": parameters,
"test_index_int": parameters,
"test_rtree_with_rectangles": parameters,
"test_r_tree_with_polygons": parameters,
"test_quad_tree_with_rectangles": parameters,
"test_quad_tree_with_polygons": parameters,
"test_dynamic_r_tree_with_rectangles": parameters,
"test_dynamic_r_tree_with_polygons": parameters
}
def test_nested_loop_with_rectangles(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
self.nested_loop(query_rdd, num_partitions, grid_type, rectangle_match_count)
def test_nested_loop_with_polygons(self, num_partitions, grid_type):
query_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else polygon_match_count
self.nested_loop(query_rdd, num_partitions, grid_type, expected_count)
def nested_loop(self, query_rdd, num_partitions, grid_type, expected_count):
spatial_rdd = self.create_point_rdd(input_location, splitter, num_partitions)
self.partition_rdds(
query_rdd, spatial_rdd, grid_type)
result = JoinQuery.SpatialJoinQuery(
spatial_rdd, query_rdd, False, True).collect()
self.sanity_check_join_results(result)
assert expected_count == self.count_join_results(result)
def test_rtree_with_rectangles(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.RTREE, polygon_match_count
)
def test_r_tree_with_polygons(self, num_partitions, grid_type):
query_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else polygon_match_count
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.RTREE, expected_count
)
def test_quad_tree_with_rectangles(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.QUADTREE, polygon_match_count
)
def test_quad_tree_with_polygons(self, num_partitions, grid_type):
query_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else polygon_match_count
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.QUADTREE, expected_count
)
def index_int(self, query_rdd, num_partitions, grid_type, index_type, expected_count):
spatial_rdd = self.create_point_rdd(input_location, splitter, num_partitions)
self.partition_rdds(query_rdd, spatial_rdd, grid_type)
spatial_rdd.buildIndex(index_type, True)
result = JoinQuery.SpatialJoinQuery(
spatial_rdd, query_rdd, False, True).collect()
self.sanity_check_join_results(result)
assert expected_count, self.count_join_results(result)
def test_dynamic_r_tree_with_rectangles(self, grid_type, num_partitions):
polygon_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
expected_count = rectangle_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) \
else rectangle_match_count
self.dynamic_rtree_int(polygon_rdd, num_partitions, grid_type, IndexType.RTREE, expected_count)
def test_dynamic_r_tree_with_polygons(self, grid_type, num_partitions):
polygon_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) \
else polygon_match_count
self.dynamic_rtree_int(polygon_rdd, num_partitions, grid_type, IndexType.RTREE, expected_count)
def dynamic_rtree_int(self, query_rdd, num_partitions, grid_type, index_type, expected_count):
spatial_rdd = self.create_point_rdd(input_location, splitter, num_partitions)
self.partition_rdds(query_rdd, spatial_rdd, grid_type)
join_params = JoinParams(True, True, index_type, JoinBuildSide.LEFT)
results = JoinQuery.spatialJoin(query_rdd, spatial_rdd, join_params).collect()
self.sanity_check_flat_join_results(results)
assert expected_count == results.__len__()
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import os
from sedona.core.enums import FileDataSplitter, GridType, IndexType
from sedona.core.enums.join_build_side import JoinBuildSide
from sedona.core.spatialOperator import JoinQuery
from sedona.core.spatialOperator.join_params import JoinParams
from tests.spatial_operator.test_join_base import TestJoinBase
from tests.tools import tests_resource
input_location = os.path.join(tests_resource, "arealm-small.csv")
input_location_query_window = os.path.join(tests_resource, "zcta510-small.csv")
offset = 1
splitter = FileDataSplitter.CSV
numPartitions = 11
distance = 0.01
query_polygon_set = os.path.join(tests_resource, "primaryroads-polygon.csv")
inputCount = 3000
inputBoundary = -173.120769, -84.965961, 30.244859, 71.355134
rectangle_match_count = 103
rectangle_match_with_original_duplicates_count = 103
polygon_match_count = 472
polygon_match_with_original_duplicates_count = 562
def pytest_generate_tests(metafunc):
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
metafunc.parametrize(
argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
)
parameters = [
dict(num_partitions=11, grid_type=GridType.QUADTREE),
dict(num_partitions=11, grid_type=GridType.QUADTREE),
dict(num_partitions=11, grid_type=GridType.KDBTREE),
]
class TestRectangleJoin(TestJoinBase):
params = {
"test_nested_loop_with_rectangles": parameters,
"test_nested_loop_with_polygons": parameters,
"test_index_int": parameters,
"test_rtree_with_rectangles": parameters,
"test_r_tree_with_polygons": parameters,
"test_quad_tree_with_rectangles": parameters,
"test_quad_tree_with_polygons": parameters,
"test_dynamic_r_tree_with_rectangles": parameters,
"test_dynamic_r_tree_with_polygons": parameters
}
def test_nested_loop_with_rectangles(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
self.nested_loop(query_rdd, num_partitions, grid_type, rectangle_match_count)
def test_nested_loop_with_polygons(self, num_partitions, grid_type):
query_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else polygon_match_count
self.nested_loop(query_rdd, num_partitions, grid_type, expected_count)
def nested_loop(self, query_rdd, num_partitions, grid_type, expected_count):
spatial_rdd = self.create_point_rdd(input_location, splitter, num_partitions)
self.partition_rdds(
query_rdd, spatial_rdd, grid_type)
result = JoinQuery.SpatialJoinQuery(
spatial_rdd, query_rdd, False, True).collect()
self.sanity_check_join_results(result)
assert expected_count == self.count_join_results(result)
def test_rtree_with_rectangles(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.RTREE, polygon_match_count
)
def test_r_tree_with_polygons(self, num_partitions, grid_type):
query_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else polygon_match_count
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.RTREE, expected_count
)
def test_quad_tree_with_rectangles(self, num_partitions, grid_type):
query_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.QUADTREE, polygon_match_count
)
def test_quad_tree_with_polygons(self, num_partitions, grid_type):
query_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) else polygon_match_count
self.index_int(
query_rdd, num_partitions, grid_type, IndexType.QUADTREE, expected_count
)
def index_int(self, query_rdd, num_partitions, grid_type, index_type, expected_count):
spatial_rdd = self.create_point_rdd(input_location, splitter, num_partitions)
self.partition_rdds(query_rdd, spatial_rdd, grid_type)
spatial_rdd.buildIndex(index_type, True)
result = JoinQuery.SpatialJoinQuery(
spatial_rdd, query_rdd, False, True).collect()
self.sanity_check_join_results(result)
assert expected_count, self.count_join_results(result)
def test_dynamic_r_tree_with_rectangles(self, grid_type, num_partitions):
polygon_rdd = self.create_rectangle_rdd(input_location_query_window, splitter, num_partitions)
expected_count = rectangle_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) \
else rectangle_match_count
self.dynamic_rtree_int(polygon_rdd, num_partitions, grid_type, IndexType.RTREE, expected_count)
def test_dynamic_r_tree_with_polygons(self, grid_type, num_partitions):
polygon_rdd = self.create_polygon_rdd(query_polygon_set, splitter, num_partitions)
expected_count = polygon_match_with_original_duplicates_count if self.expect_to_preserve_original_duplicates(
grid_type) \
else polygon_match_count
self.dynamic_rtree_int(polygon_rdd, num_partitions, grid_type, IndexType.RTREE, expected_count)
def dynamic_rtree_int(self, query_rdd, num_partitions, grid_type, index_type, expected_count):
spatial_rdd = self.create_point_rdd(input_location, splitter, num_partitions)
self.partition_rdds(query_rdd, spatial_rdd, grid_type)
join_params = JoinParams(True, True, index_type, JoinBuildSide.LEFT)
results = JoinQuery.spatialJoin(query_rdd, spatial_rdd, join_params).collect()
self.sanity_check_flat_join_results(results)
assert expected_count == results.__len__()
| en | 0.865663 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. | 1.631222 | 2 |
p1_basic/day16_21module/day19/03_os精讲.py | dong-pro/fullStackPython | 1 | 6630134 | # os.system("bash command") 运行shell命令,直接显示
# os.popen("bash command).read() 运行shell命令,获取执行结果
# os.getcwd() 获取当前工作目录,即当前python脚本工作的目录路径
# os.chdir("dirname") 改变当前脚本工作目录;相当于shell下cd
import os
# 统计文件的大小
# os.path.getsize('路径') # python的命令
# dir 路径 \C # 操作系统的命令
# 帮助你显示当前路径下的所有文件和文件夹
# os.system('dir 路径') # 使用python语言直接执行操作系统的命令
# os.listdir('路径') # 使用python语言的os模块提供的方法 间接调用了操作系统命令
# 学习python的人
# web开发
# 运维开发 : 运维功底 熟悉操作系统命令
# exec('字符串数据类型的python代码')
# eval('执行字符串数据类型的python代码')
# os.system('执行字符串数据类型的操作系统命令')
# os.popen('执行字符串数据类型的操作系统命令,并返回结果')
# getcwd # 获取当前执行命令的时候所在的目录
# chdir # 修改当前执行命令的时候所在的目录
# ret = os.listdir('D:\sylar\s15')
# print(ret)
# os.chdir('D:\sylar\s15')
# print(os.popen('dir').read())
# os模块所做的事情
# 定制了很多方法 间接的帮助你去调用操作系统的命令 获得结果
# 然后帮助你分析整理成我们需要的数据类型的形态
# 你也可以os.popen/os.system直接去调用操作系统的命令 获得结果
# 但是 分析和整理的工作需要你自己做
# 用os模块的方法本身能够完成的功能我们就用定制好的方法就够了
# 如果有一天 你发现os模块定制好的功能解决不了我们的问题了
# 而刚好操作系统的命令能够很好地帮助我们解决问题。这个时候就用os.popen/os.system | # os.system("bash command") 运行shell命令,直接显示
# os.popen("bash command).read() 运行shell命令,获取执行结果
# os.getcwd() 获取当前工作目录,即当前python脚本工作的目录路径
# os.chdir("dirname") 改变当前脚本工作目录;相当于shell下cd
import os
# 统计文件的大小
# os.path.getsize('路径') # python的命令
# dir 路径 \C # 操作系统的命令
# 帮助你显示当前路径下的所有文件和文件夹
# os.system('dir 路径') # 使用python语言直接执行操作系统的命令
# os.listdir('路径') # 使用python语言的os模块提供的方法 间接调用了操作系统命令
# 学习python的人
# web开发
# 运维开发 : 运维功底 熟悉操作系统命令
# exec('字符串数据类型的python代码')
# eval('执行字符串数据类型的python代码')
# os.system('执行字符串数据类型的操作系统命令')
# os.popen('执行字符串数据类型的操作系统命令,并返回结果')
# getcwd # 获取当前执行命令的时候所在的目录
# chdir # 修改当前执行命令的时候所在的目录
# ret = os.listdir('D:\sylar\s15')
# print(ret)
# os.chdir('D:\sylar\s15')
# print(os.popen('dir').read())
# os模块所做的事情
# 定制了很多方法 间接的帮助你去调用操作系统的命令 获得结果
# 然后帮助你分析整理成我们需要的数据类型的形态
# 你也可以os.popen/os.system直接去调用操作系统的命令 获得结果
# 但是 分析和整理的工作需要你自己做
# 用os模块的方法本身能够完成的功能我们就用定制好的方法就够了
# 如果有一天 你发现os模块定制好的功能解决不了我们的问题了
# 而刚好操作系统的命令能够很好地帮助我们解决问题。这个时候就用os.popen/os.system | zh | 0.938121 | # os.system("bash command") 运行shell命令,直接显示 # os.popen("bash command).read() 运行shell命令,获取执行结果 # os.getcwd() 获取当前工作目录,即当前python脚本工作的目录路径 # os.chdir("dirname") 改变当前脚本工作目录;相当于shell下cd # 统计文件的大小 # os.path.getsize('路径') # python的命令 # dir 路径 \C # 操作系统的命令 # 帮助你显示当前路径下的所有文件和文件夹 # os.system('dir 路径') # 使用python语言直接执行操作系统的命令 # os.listdir('路径') # 使用python语言的os模块提供的方法 间接调用了操作系统命令 # 学习python的人 # web开发 # 运维开发 : 运维功底 熟悉操作系统命令 # exec('字符串数据类型的python代码') # eval('执行字符串数据类型的python代码') # os.system('执行字符串数据类型的操作系统命令') # os.popen('执行字符串数据类型的操作系统命令,并返回结果') # getcwd # 获取当前执行命令的时候所在的目录 # chdir # 修改当前执行命令的时候所在的目录 # ret = os.listdir('D:\sylar\s15') # print(ret) # os.chdir('D:\sylar\s15') # print(os.popen('dir').read()) # os模块所做的事情 # 定制了很多方法 间接的帮助你去调用操作系统的命令 获得结果 # 然后帮助你分析整理成我们需要的数据类型的形态 # 你也可以os.popen/os.system直接去调用操作系统的命令 获得结果 # 但是 分析和整理的工作需要你自己做 # 用os模块的方法本身能够完成的功能我们就用定制好的方法就够了 # 如果有一天 你发现os模块定制好的功能解决不了我们的问题了 # 而刚好操作系统的命令能够很好地帮助我们解决问题。这个时候就用os.popen/os.system | 2.916439 | 3 |
catkin_ws/src/00-infrastructure/easy_algo/include/easy_algo/formatting.py | yxiao1996/dev | 2 | 6630135 | from duckietown_utils import (friendly_path, indent, make_row_red, make_red,
remove_table_field, format_table_plus, yaml_dump_pretty)
from .algo_db import EasyAlgoFamily
def format_db(db, colorize=True, verbose=False):
families = list(db.family_name2config.values())
s = format_families(families, colorize, verbose=verbose)
return s
def format_families(families, colorize=True, verbose=True):
if not families:
s = "No algorithm families found."
return s
else:
table = []
table.append(['Family name',
'interface',
'pattern',
'# found',
'valid',
'filename',
'description',
])
for family in families:
assert isinstance(family, EasyAlgoFamily)
row = []
row.append(family.family_name)
row.append(family.interface)
row.append(family.instances_pattern)
if not family.instances:
row.append('\n(none)')
else:
n_valid = len([_ for _ in family.instances.values() if _.valid])
n_invalid = len(family.instances) - n_valid
ss = '%s' % len(family.instances)
if n_invalid:
ss += make_red(' (%d invalid)' % n_invalid)
row.append(ss)
if family.valid:
ss = 'yes'
else:
ss = 'no: ' + family.error_if_invalid
row.append(ss)
row.append(friendly_path(family.filename))
if (not family.valid) and colorize:
row = make_row_red(row)
row.append(family.description.strip())
table.append(row)
if not verbose:
remove_table_field(table, 'filename')
s = "Found %d algorithm families:\n\n" % len(families)
s += indent(format_table_plus(table, colspacing=4), ' ')
return s
def format_instances(family, colorize, verbose=False):
if not family.instances:
s = ('No instances files found for family "%s" (pattern = %s).\n\n' %
(family.family_name, family.instances_pattern))
return s
else:
s = ('Found %d instances of algorithm family "%s":\n' %
(len(family.instances), family.family_name))
table = []
table.append(['Instance name', 'constructor',
'parameters', 'description', 'filename'])
for _ in family.instances.values():
row = []
name = _.instance_name
if (not _.valid) and colorize:
name = make_red(name)
row.append(name)
row.append(_.constructor)
row.append(yaml_dump_pretty(_.parameters))
row.append(_.description)
row.append(friendly_path(_.filename))
table.append(row)
if not verbose:
remove_table_field(table, 'filename')
remove_table_field(table, 'description')
s += indent(format_table_plus(table, colspacing=4), '| ')
for _ in family.instances.values():
if not _.valid:
msg = _.error_if_invalid
s += make_red('\n' + indent(msg, '', _.instance_name + ': '))
return s
| from duckietown_utils import (friendly_path, indent, make_row_red, make_red,
remove_table_field, format_table_plus, yaml_dump_pretty)
from .algo_db import EasyAlgoFamily
def format_db(db, colorize=True, verbose=False):
families = list(db.family_name2config.values())
s = format_families(families, colorize, verbose=verbose)
return s
def format_families(families, colorize=True, verbose=True):
if not families:
s = "No algorithm families found."
return s
else:
table = []
table.append(['Family name',
'interface',
'pattern',
'# found',
'valid',
'filename',
'description',
])
for family in families:
assert isinstance(family, EasyAlgoFamily)
row = []
row.append(family.family_name)
row.append(family.interface)
row.append(family.instances_pattern)
if not family.instances:
row.append('\n(none)')
else:
n_valid = len([_ for _ in family.instances.values() if _.valid])
n_invalid = len(family.instances) - n_valid
ss = '%s' % len(family.instances)
if n_invalid:
ss += make_red(' (%d invalid)' % n_invalid)
row.append(ss)
if family.valid:
ss = 'yes'
else:
ss = 'no: ' + family.error_if_invalid
row.append(ss)
row.append(friendly_path(family.filename))
if (not family.valid) and colorize:
row = make_row_red(row)
row.append(family.description.strip())
table.append(row)
if not verbose:
remove_table_field(table, 'filename')
s = "Found %d algorithm families:\n\n" % len(families)
s += indent(format_table_plus(table, colspacing=4), ' ')
return s
def format_instances(family, colorize, verbose=False):
if not family.instances:
s = ('No instances files found for family "%s" (pattern = %s).\n\n' %
(family.family_name, family.instances_pattern))
return s
else:
s = ('Found %d instances of algorithm family "%s":\n' %
(len(family.instances), family.family_name))
table = []
table.append(['Instance name', 'constructor',
'parameters', 'description', 'filename'])
for _ in family.instances.values():
row = []
name = _.instance_name
if (not _.valid) and colorize:
name = make_red(name)
row.append(name)
row.append(_.constructor)
row.append(yaml_dump_pretty(_.parameters))
row.append(_.description)
row.append(friendly_path(_.filename))
table.append(row)
if not verbose:
remove_table_field(table, 'filename')
remove_table_field(table, 'description')
s += indent(format_table_plus(table, colspacing=4), '| ')
for _ in family.instances.values():
if not _.valid:
msg = _.error_if_invalid
s += make_red('\n' + indent(msg, '', _.instance_name + ': '))
return s
| none | 1 | 2.428389 | 2 |
|
KGEkeras/utils.py | Erik-BM/KGE-Keras | 2 | 6630136 |
import numpy as np
from tqdm import tqdm
from scipy.stats import rankdata
from random import choice
from collections import defaultdict
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.losses import binary_crossentropy
import tensorflow as tf
from random import choices
EPSILON = 1e-6
from rdflib import Graph, URIRef, Literal, Namespace
import rdflib
from rdflib.namespace import XSD, RDF
UNIT = Namespace('http://qudt.org/vocab/unit#')
from tqdm import tqdm
import spacy
VEC_SIZE = 300
def isint(value):
try:
int(value)
return True
except ValueError:
return False
class LiteralConverter:
def __init__(self,g,padding_value=0):
self.g = g
self.non_literal_entities = set(g.subjects()) | set([o for o in g.objects() if isinstance(o,URIRef)])
self.literal_predicates = set([p for p,o in g.predicate_objects() if isinstance(o,Literal)])
self.padding_value = padding_value
self.lang_models = {'xx':spacy.load('xx_ent_wiki_sm'),'en':spacy.load('en_core_web_md')}
def _process_string_literal(self,x):
doc = self.lang_models['en'](str(x))
v = doc.vector
if len(v) < 1:
v = self.padding_value*np.ones((VEC_SIZE,))
return v
def _process_literal(self,x):
if hasattr(x,'datatype') and (x.datatype == XSD['float'] or x.datatype == XSD['double']):
return [float(x)]
if hasattr(x,'datatype') and x.datatype == XSD['date']:
return URIRef('http://examples.org/date/%s' % str(x))
if hasattr(x,'datatype') and x.datatype == XSD['boolean']:
return [1] if bool(x) else [0]
if len(str(x)) == 4 and isint(x):
return URIRef('http://examples.org/date/%s' % str(x))
if hasattr(x,'datatype') and (x.datatype is None or x.datatype == XSD['string']):
return self._process_string_literal(x)
return None
def fit(self):
out = defaultdict(dict)
vec_or_num = {}
array_ps = set()
for i,e in tqdm(enumerate(self.non_literal_entities),total=len(self.non_literal_entities),desc='Processing literals'):
for j,p in enumerate(self.literal_predicates):
tmp = set(self.g.objects(subject = e, predicate = p / RDF.value)) | set(self.g.objects(subject = e, predicate = p))
unit = set(self.g.objects(subject = e, predicate = p / UNIT.units))
for t in tmp:
t = self._process_literal(t)
if t is None:
continue
elif isinstance(t,URIRef):
self.g.add((e,p,t))
else:
out[p][e] = t
if p not in vec_or_num: vec_or_num[p] = len(t)
s=sum(i for k,i in vec_or_num.items())
self.literals = {}
for e in self.non_literal_entities:
tmp = []
for p in self.literal_predicates:
if not p in vec_or_num: continue
if e in out[p]:
tmp.append(np.asarray(out[p][e]).reshape((1,-1)))
else:
tmp.append(self.padding_value*np.ones((1,vec_or_num[p])))
tmp = np.concatenate(tmp,axis=1).reshape((-1,))
assert len(tmp) == s
self.literals[e] = tmp
def transform(self,entities):
return np.asarray([self.literals[e] for e in entities])
def fit_transform(self,entities):
if not hasattr(self,'literals'):
self.fit()
return self.transform(entities)
def load_kg(path):
out = []
with open(path,'r') as f:
for l in f:
l = l.strip().split()
out.append(l)
return out
def generate_negative(kg, N, negative=2, check_kg=False, corrupt_head=True, corrupt_tail=True):
# false triples:
assert corrupt_head or corrupt_tail
R = np.repeat(np.asarray([p for _,p,_ in kg]).reshape((-1,1)),negative,axis=0)
fs = np.random.randint(0,N,size=(negative*len(kg),1))
fo = np.random.randint(0,N,size=(negative*len(kg),1))
negative_kg = np.stack([fs,R,fo],axis=1)
return negative_kg
def oversample_data(kgs,x=None,y=None,testing=False):
if testing:
kgs = [list(kg)[:len(y)] for kg in kgs]
else:
kgs = [list(kg) for kg in kgs]
if y is not None:
m = max(max(map(len,kgs)),len(y))
else:
m = max(map(len,kgs))
out = []
for kg in kgs:
out.append(choices(kg, k=m))
if x is not None and y is not None:
k = np.ceil(m/len(y))
y = np.repeat(y,k,axis=0)[:m]
x = np.repeat(x,k,axis=0)[:m,:]
for s in np.split(x,3,axis=1):
out.append(s.reshape((-1,)))
return [np.squeeze(np.asarray(o)) for o in out], np.asarray(y)
else:
return [np.squeeze(np.asarray(o)) for o in out]
def pad(kg,bs):
kg = list(kg)
while len(kg) % bs != 0:
kg.append(choice(kg))
return np.asarray(kg)
def mrr(target, scores):
scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels = [x for x,_ in scores]
return 1/(1+labels.index(target))
def hits(target, scores, k=10):
scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels = [x for x,_ in scores][:k]
return int(target in labels)
def gen_tail_data(test_data,num_entities,bs,filter_t):
for s,p,o in test_data:
candiate_objects = list(range(num_entities))
candiate_objects.remove(o)
for oi in filter_t[(s,p)]:
candiate_objects.remove(oi)
subjects = np.asarray([[int(s)]]*(len(candiate_objects)+1))
predicates = np.asarray([[int(p)]]*(len(candiate_objects)+1))
objects = np.asarray([[int(o)]] + [[ent_id] for ent_id in candiate_objects])
triples = np.concatenate((subjects,predicates,objects),axis=-1)
yield triples.reshape((-1,3))
def gen_head_data(test_data,num_entities,bs,filter_h):
for s,p,o in test_data:
candiate_subjects = list(range(num_entities))
candiate_subjects.remove(s)
for si in filter_h[(p,o)]:
candiate_subjects.remove(si)
objects = np.asarray([[int(o)]]*(len(candiate_subjects)+1))
predicates = np.asarray([[int(p)]]*(len(candiate_subjects)+1))
subjects = np.asarray([[int(s)]] + [[ent_id] for ent_id in candiate_subjects])
triples = np.concatenate((subjects,predicates,objects),axis=-1)
yield triples.reshape((-1,3))
def validate(model, test_data, num_entities, bs, filtering_triples = None):
filter_h = defaultdict(set)
filter_t = defaultdict(set)
for s,p,o in filtering_triples:
filter_h[(p,o)].add(s)
filter_t[(s,p)].add(o)
c_1, c_3, c_10 = 0,0,0
mean_ranks = []
for t in tqdm(gen_tail_data(test_data,num_entities,bs,filter_t),total=len(test_data)):
res = np.asarray(model.predict(t)).reshape((-1,))
r = rankdata(res,'max')
target_rank = r[0]
num_candidate = len(res)
real_rank = num_candidate - target_rank + 1
c_1 += 1 if target_rank == num_candidate else 0
c_3 += 1 if target_rank + 3 > num_candidate else 0
c_10 += 1 if target_rank + 10 > num_candidate else 0
mean_ranks.append(real_rank)
tail_hit_at_1 = c_1 / float(len(test_data))
tail_hit_at_3 = c_3 / float(len(test_data))
tail_hit_at_10 = c_10 / float(len(test_data))
tail_avg_rank = np.mean(mean_ranks)
tail_mrr = np.mean([1/m for m in mean_ranks])
c_1, c_3, c_10 = 0,0,0
mean_ranks = []
for t in tqdm(gen_head_data(test_data,num_entities,bs,filter_h),total=len(test_data)):
res = np.asarray(model.predict(t)).reshape((-1,))
r = rankdata(res,'max')
target_rank = r[0]
num_candidate = len(res)
real_rank = num_candidate - target_rank + 1
c_1 += 1 if target_rank == num_candidate else 0
c_3 += 1 if target_rank + 3 > num_candidate else 0
c_10 += 1 if target_rank + 10 > num_candidate else 0
mean_ranks.append(real_rank)
head_hit_at_1 = c_1 / float(len(test_data))
head_hit_at_3 = c_3 / float(len(test_data))
head_hit_at_10 = c_10 / float(len(test_data))
head_avg_rank = np.mean(mean_ranks)
head_mrr = np.mean([1/m for m in mean_ranks])
metrics = {'tail_hits@1':tail_hit_at_1,
'tail_hits@3':tail_hit_at_3,
'tail_hits@10':tail_hit_at_10,
'tail_mr':tail_avg_rank,
'tail_mrr':tail_mrr,
'head_hits@1':head_hit_at_1,
'head_hits@3':head_hit_at_3,
'head_hits@10':head_hit_at_10,
'head_mr':head_avg_rank,
'head_mrr':head_mrr,
'hits@1':(tail_hit_at_1+head_hit_at_1)/2,
'hits@3':(tail_hit_at_3+head_hit_at_3)/2,
'hits@10':(tail_hit_at_10+head_hit_at_10)/2,
'mr':(tail_avg_rank+head_avg_rank)/2,
'mrr':(tail_mrr+head_mrr)/2,
}
return metrics
class KGEValidateCallback(Callback):
def __init__(self, validation_data, train_data=None, *args, **kwargs):
super(Callback, self).__init__(*args, **kwargs)
self.validation_data = validation_data
self.train_data = train_data
def on_epoch_end(self, epoch, logs = None):
if epoch % 5 == 0:
logs = logs or {}
tmp = validate(self.model,
self.validation_data,
self.model.num_entities,
self.train_data)
for k in tmp:
logs['val_'+k] = tmp[k]
def on_train_end(self, logs=None):
self.on_epoch_end(100,logs=logs)
def pointwize_hinge(true,false,margin=1,negative_samples=1, reduce_mean = True):
return tf.reduce_mean(tf.nn.relu(margin-true))+tf.reduce_mean(tf.nn.relu(margin+false))
def pointwize_logistic(true,false,margin=1,negative_samples=1, reduce_mean = True):
return tf.reduce_mean(tf.math.log(EPSILON+1+tf.math.exp(-true)))+tf.reduce_mean(tf.math.log(EPSILON+1+tf.math.exp(false)))
def pointwize_square_loss(true,false,margin=1,negative_samples=1, reduce_mean = True):
return tf.reduce_mean(tf.square(margin-true))+tf.reduce_mean(tf.square(margin+false))
def pointwize_cross_entropy(true,false,margin=1,negative_samples=1, reduce_mean = True):
return binary_crossentropy(1,true)+binary_crossentropy(0,false)
def pairwize_hinge(true,false,margin=1, negative_samples=1, reduce_mean = True):
false = tf.reshape(false,(-1,negative_samples))
tmp = tf.nn.relu(margin+false-true)
if reduce_mean:
return tf.reduce_mean(tmp)
return tmp
def pairwize_logistic(true,false,margin=0, negative_samples=1, reduce_mean = True):
false = tf.reshape(false,(-1,negative_samples))
tmp = tf.math.log(EPSILON+1+tf.math.exp(false-true))
if reduce_mean:
return tf.reduce_mean(tmp)
return tmp
def pairwize_square_loss(true,false,margin=0, negative_samples=1, reduce_mean = True):
false = tf.reshape(false,(-1,negative_samples))
tmp = - tf.square(false-true)
if reduce_mean:
return tf.reduce_mean(tmp)
return tmp
def loss_function_lookup(name):
return {
'pointwize_hinge':pointwize_hinge,
'pointwize_logistic':pointwize_logistic,
'pointwize_cross_entropy':pointwize_cross_entropy,
'pointwize_square_loss':pointwize_square_loss,
'pairwize_hinge':pairwize_hinge,
'pairwize_logistic':pairwize_logistic,
'pairwize_square_loss':pairwize_square_loss
}[name]
|
import numpy as np
from tqdm import tqdm
from scipy.stats import rankdata
from random import choice
from collections import defaultdict
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.losses import binary_crossentropy
import tensorflow as tf
from random import choices
EPSILON = 1e-6
from rdflib import Graph, URIRef, Literal, Namespace
import rdflib
from rdflib.namespace import XSD, RDF
UNIT = Namespace('http://qudt.org/vocab/unit#')
from tqdm import tqdm
import spacy
VEC_SIZE = 300
def isint(value):
try:
int(value)
return True
except ValueError:
return False
class LiteralConverter:
def __init__(self,g,padding_value=0):
self.g = g
self.non_literal_entities = set(g.subjects()) | set([o for o in g.objects() if isinstance(o,URIRef)])
self.literal_predicates = set([p for p,o in g.predicate_objects() if isinstance(o,Literal)])
self.padding_value = padding_value
self.lang_models = {'xx':spacy.load('xx_ent_wiki_sm'),'en':spacy.load('en_core_web_md')}
def _process_string_literal(self,x):
doc = self.lang_models['en'](str(x))
v = doc.vector
if len(v) < 1:
v = self.padding_value*np.ones((VEC_SIZE,))
return v
def _process_literal(self,x):
if hasattr(x,'datatype') and (x.datatype == XSD['float'] or x.datatype == XSD['double']):
return [float(x)]
if hasattr(x,'datatype') and x.datatype == XSD['date']:
return URIRef('http://examples.org/date/%s' % str(x))
if hasattr(x,'datatype') and x.datatype == XSD['boolean']:
return [1] if bool(x) else [0]
if len(str(x)) == 4 and isint(x):
return URIRef('http://examples.org/date/%s' % str(x))
if hasattr(x,'datatype') and (x.datatype is None or x.datatype == XSD['string']):
return self._process_string_literal(x)
return None
def fit(self):
out = defaultdict(dict)
vec_or_num = {}
array_ps = set()
for i,e in tqdm(enumerate(self.non_literal_entities),total=len(self.non_literal_entities),desc='Processing literals'):
for j,p in enumerate(self.literal_predicates):
tmp = set(self.g.objects(subject = e, predicate = p / RDF.value)) | set(self.g.objects(subject = e, predicate = p))
unit = set(self.g.objects(subject = e, predicate = p / UNIT.units))
for t in tmp:
t = self._process_literal(t)
if t is None:
continue
elif isinstance(t,URIRef):
self.g.add((e,p,t))
else:
out[p][e] = t
if p not in vec_or_num: vec_or_num[p] = len(t)
s=sum(i for k,i in vec_or_num.items())
self.literals = {}
for e in self.non_literal_entities:
tmp = []
for p in self.literal_predicates:
if not p in vec_or_num: continue
if e in out[p]:
tmp.append(np.asarray(out[p][e]).reshape((1,-1)))
else:
tmp.append(self.padding_value*np.ones((1,vec_or_num[p])))
tmp = np.concatenate(tmp,axis=1).reshape((-1,))
assert len(tmp) == s
self.literals[e] = tmp
def transform(self,entities):
return np.asarray([self.literals[e] for e in entities])
def fit_transform(self,entities):
if not hasattr(self,'literals'):
self.fit()
return self.transform(entities)
def load_kg(path):
out = []
with open(path,'r') as f:
for l in f:
l = l.strip().split()
out.append(l)
return out
def generate_negative(kg, N, negative=2, check_kg=False, corrupt_head=True, corrupt_tail=True):
# false triples:
assert corrupt_head or corrupt_tail
R = np.repeat(np.asarray([p for _,p,_ in kg]).reshape((-1,1)),negative,axis=0)
fs = np.random.randint(0,N,size=(negative*len(kg),1))
fo = np.random.randint(0,N,size=(negative*len(kg),1))
negative_kg = np.stack([fs,R,fo],axis=1)
return negative_kg
def oversample_data(kgs,x=None,y=None,testing=False):
if testing:
kgs = [list(kg)[:len(y)] for kg in kgs]
else:
kgs = [list(kg) for kg in kgs]
if y is not None:
m = max(max(map(len,kgs)),len(y))
else:
m = max(map(len,kgs))
out = []
for kg in kgs:
out.append(choices(kg, k=m))
if x is not None and y is not None:
k = np.ceil(m/len(y))
y = np.repeat(y,k,axis=0)[:m]
x = np.repeat(x,k,axis=0)[:m,:]
for s in np.split(x,3,axis=1):
out.append(s.reshape((-1,)))
return [np.squeeze(np.asarray(o)) for o in out], np.asarray(y)
else:
return [np.squeeze(np.asarray(o)) for o in out]
def pad(kg,bs):
kg = list(kg)
while len(kg) % bs != 0:
kg.append(choice(kg))
return np.asarray(kg)
def mrr(target, scores):
scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels = [x for x,_ in scores]
return 1/(1+labels.index(target))
def hits(target, scores, k=10):
scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels = [x for x,_ in scores][:k]
return int(target in labels)
def gen_tail_data(test_data,num_entities,bs,filter_t):
for s,p,o in test_data:
candiate_objects = list(range(num_entities))
candiate_objects.remove(o)
for oi in filter_t[(s,p)]:
candiate_objects.remove(oi)
subjects = np.asarray([[int(s)]]*(len(candiate_objects)+1))
predicates = np.asarray([[int(p)]]*(len(candiate_objects)+1))
objects = np.asarray([[int(o)]] + [[ent_id] for ent_id in candiate_objects])
triples = np.concatenate((subjects,predicates,objects),axis=-1)
yield triples.reshape((-1,3))
def gen_head_data(test_data,num_entities,bs,filter_h):
for s,p,o in test_data:
candiate_subjects = list(range(num_entities))
candiate_subjects.remove(s)
for si in filter_h[(p,o)]:
candiate_subjects.remove(si)
objects = np.asarray([[int(o)]]*(len(candiate_subjects)+1))
predicates = np.asarray([[int(p)]]*(len(candiate_subjects)+1))
subjects = np.asarray([[int(s)]] + [[ent_id] for ent_id in candiate_subjects])
triples = np.concatenate((subjects,predicates,objects),axis=-1)
yield triples.reshape((-1,3))
def validate(model, test_data, num_entities, bs, filtering_triples = None):
filter_h = defaultdict(set)
filter_t = defaultdict(set)
for s,p,o in filtering_triples:
filter_h[(p,o)].add(s)
filter_t[(s,p)].add(o)
c_1, c_3, c_10 = 0,0,0
mean_ranks = []
for t in tqdm(gen_tail_data(test_data,num_entities,bs,filter_t),total=len(test_data)):
res = np.asarray(model.predict(t)).reshape((-1,))
r = rankdata(res,'max')
target_rank = r[0]
num_candidate = len(res)
real_rank = num_candidate - target_rank + 1
c_1 += 1 if target_rank == num_candidate else 0
c_3 += 1 if target_rank + 3 > num_candidate else 0
c_10 += 1 if target_rank + 10 > num_candidate else 0
mean_ranks.append(real_rank)
tail_hit_at_1 = c_1 / float(len(test_data))
tail_hit_at_3 = c_3 / float(len(test_data))
tail_hit_at_10 = c_10 / float(len(test_data))
tail_avg_rank = np.mean(mean_ranks)
tail_mrr = np.mean([1/m for m in mean_ranks])
c_1, c_3, c_10 = 0,0,0
mean_ranks = []
for t in tqdm(gen_head_data(test_data,num_entities,bs,filter_h),total=len(test_data)):
res = np.asarray(model.predict(t)).reshape((-1,))
r = rankdata(res,'max')
target_rank = r[0]
num_candidate = len(res)
real_rank = num_candidate - target_rank + 1
c_1 += 1 if target_rank == num_candidate else 0
c_3 += 1 if target_rank + 3 > num_candidate else 0
c_10 += 1 if target_rank + 10 > num_candidate else 0
mean_ranks.append(real_rank)
head_hit_at_1 = c_1 / float(len(test_data))
head_hit_at_3 = c_3 / float(len(test_data))
head_hit_at_10 = c_10 / float(len(test_data))
head_avg_rank = np.mean(mean_ranks)
head_mrr = np.mean([1/m for m in mean_ranks])
metrics = {'tail_hits@1':tail_hit_at_1,
'tail_hits@3':tail_hit_at_3,
'tail_hits@10':tail_hit_at_10,
'tail_mr':tail_avg_rank,
'tail_mrr':tail_mrr,
'head_hits@1':head_hit_at_1,
'head_hits@3':head_hit_at_3,
'head_hits@10':head_hit_at_10,
'head_mr':head_avg_rank,
'head_mrr':head_mrr,
'hits@1':(tail_hit_at_1+head_hit_at_1)/2,
'hits@3':(tail_hit_at_3+head_hit_at_3)/2,
'hits@10':(tail_hit_at_10+head_hit_at_10)/2,
'mr':(tail_avg_rank+head_avg_rank)/2,
'mrr':(tail_mrr+head_mrr)/2,
}
return metrics
class KGEValidateCallback(Callback):
def __init__(self, validation_data, train_data=None, *args, **kwargs):
super(Callback, self).__init__(*args, **kwargs)
self.validation_data = validation_data
self.train_data = train_data
def on_epoch_end(self, epoch, logs = None):
if epoch % 5 == 0:
logs = logs or {}
tmp = validate(self.model,
self.validation_data,
self.model.num_entities,
self.train_data)
for k in tmp:
logs['val_'+k] = tmp[k]
def on_train_end(self, logs=None):
self.on_epoch_end(100,logs=logs)
def pointwize_hinge(true,false,margin=1,negative_samples=1, reduce_mean = True):
return tf.reduce_mean(tf.nn.relu(margin-true))+tf.reduce_mean(tf.nn.relu(margin+false))
def pointwize_logistic(true,false,margin=1,negative_samples=1, reduce_mean = True):
return tf.reduce_mean(tf.math.log(EPSILON+1+tf.math.exp(-true)))+tf.reduce_mean(tf.math.log(EPSILON+1+tf.math.exp(false)))
def pointwize_square_loss(true,false,margin=1,negative_samples=1, reduce_mean = True):
return tf.reduce_mean(tf.square(margin-true))+tf.reduce_mean(tf.square(margin+false))
def pointwize_cross_entropy(true,false,margin=1,negative_samples=1, reduce_mean = True):
return binary_crossentropy(1,true)+binary_crossentropy(0,false)
def pairwize_hinge(true,false,margin=1, negative_samples=1, reduce_mean = True):
false = tf.reshape(false,(-1,negative_samples))
tmp = tf.nn.relu(margin+false-true)
if reduce_mean:
return tf.reduce_mean(tmp)
return tmp
def pairwize_logistic(true,false,margin=0, negative_samples=1, reduce_mean = True):
false = tf.reshape(false,(-1,negative_samples))
tmp = tf.math.log(EPSILON+1+tf.math.exp(false-true))
if reduce_mean:
return tf.reduce_mean(tmp)
return tmp
def pairwize_square_loss(true,false,margin=0, negative_samples=1, reduce_mean = True):
false = tf.reshape(false,(-1,negative_samples))
tmp = - tf.square(false-true)
if reduce_mean:
return tf.reduce_mean(tmp)
return tmp
def loss_function_lookup(name):
return {
'pointwize_hinge':pointwize_hinge,
'pointwize_logistic':pointwize_logistic,
'pointwize_cross_entropy':pointwize_cross_entropy,
'pointwize_square_loss':pointwize_square_loss,
'pairwize_hinge':pairwize_hinge,
'pairwize_logistic':pairwize_logistic,
'pairwize_square_loss':pairwize_square_loss
}[name]
| es | 0.410895 | #') # false triples: | 2.048696 | 2 |
services/backend/thiamsu/management/commands/switch.py | LKKTGB/thiamsu | 10 | 6630137 | from django.core.management.base import BaseCommand
from django.db import models
from thiamsu.models.translation import Translation
class Command(BaseCommand):
help = "Load ptt data from csv file"
def add_arguments(self, parser):
parser.add_argument(
"--user-id", default=None, help="Target user to switch translations"
)
parser.add_argument(
"--song-id", default=None, help="Target song to switch translations"
)
parser.add_argument("--dryrun", action="store_true")
def get_latest_translations(self, lang, user_id=None, song_id=None):
"""
Return map {(song_id, line_no): latest_translation} of given contributor and language
"""
q_statement = models.Q(lang__exact=lang)
if user_id:
q_statement &= models.Q(contributor__exact=user_id)
if song_id:
q_statement &= models.Q(song__exact=song_id)
translations = Translation.objects.filter(q_statement).all()
latest_translations = {}
for t in translations:
k = (t.song.id, t.line_no)
curr = latest_translations.get(k)
if not curr or curr.created_at < t.created_at:
latest_translations[k] = t
return latest_translations
def handle(self, *args, **options):
hanzi_translations = self.get_latest_translations(
lang="hanzi", user_id=options["user_id"], song_id=options["song_id"]
)
tailo_translations = self.get_latest_translations(
lang="tailo", user_id=options["user_id"], song_id=options["song_id"]
)
for song_id, line_no in sorted(hanzi_translations.keys()):
k = (song_id, line_no)
song = hanzi_translations[k].song
contributor = hanzi_translations[k].contributor
if k not in tailo_translations:
continue
if options["dryrun"]:
print(
"switch",
song.id,
line_no,
hanzi_translations[k].content,
tailo_translations[k].content,
"(dryrun)",
)
else:
# save hanzi as new tailo
Translation(
song=song,
line_no=line_no,
lang="tailo",
content=hanzi_translations[k].content,
contributor=contributor,
).save()
# save tailo as new hanzi
Translation(
song=song,
line_no=line_no,
lang="hanzi",
content=tailo_translations[k].content,
contributor=contributor,
).save()
# log
print(
"switch",
song.id,
line_no,
hanzi_translations[k].content,
tailo_translations[k].content,
)
| from django.core.management.base import BaseCommand
from django.db import models
from thiamsu.models.translation import Translation
class Command(BaseCommand):
help = "Load ptt data from csv file"
def add_arguments(self, parser):
parser.add_argument(
"--user-id", default=None, help="Target user to switch translations"
)
parser.add_argument(
"--song-id", default=None, help="Target song to switch translations"
)
parser.add_argument("--dryrun", action="store_true")
def get_latest_translations(self, lang, user_id=None, song_id=None):
"""
Return map {(song_id, line_no): latest_translation} of given contributor and language
"""
q_statement = models.Q(lang__exact=lang)
if user_id:
q_statement &= models.Q(contributor__exact=user_id)
if song_id:
q_statement &= models.Q(song__exact=song_id)
translations = Translation.objects.filter(q_statement).all()
latest_translations = {}
for t in translations:
k = (t.song.id, t.line_no)
curr = latest_translations.get(k)
if not curr or curr.created_at < t.created_at:
latest_translations[k] = t
return latest_translations
def handle(self, *args, **options):
hanzi_translations = self.get_latest_translations(
lang="hanzi", user_id=options["user_id"], song_id=options["song_id"]
)
tailo_translations = self.get_latest_translations(
lang="tailo", user_id=options["user_id"], song_id=options["song_id"]
)
for song_id, line_no in sorted(hanzi_translations.keys()):
k = (song_id, line_no)
song = hanzi_translations[k].song
contributor = hanzi_translations[k].contributor
if k not in tailo_translations:
continue
if options["dryrun"]:
print(
"switch",
song.id,
line_no,
hanzi_translations[k].content,
tailo_translations[k].content,
"(dryrun)",
)
else:
# save hanzi as new tailo
Translation(
song=song,
line_no=line_no,
lang="tailo",
content=hanzi_translations[k].content,
contributor=contributor,
).save()
# save tailo as new hanzi
Translation(
song=song,
line_no=line_no,
lang="hanzi",
content=tailo_translations[k].content,
contributor=contributor,
).save()
# log
print(
"switch",
song.id,
line_no,
hanzi_translations[k].content,
tailo_translations[k].content,
)
| en | 0.752522 | Return map {(song_id, line_no): latest_translation} of given contributor and language # save hanzi as new tailo # save tailo as new hanzi # log | 2.225453 | 2 |
src/bjointsp/heuristic/heuristic.py | 5GCity/5GCity-resource-placement | 0 | 6630138 | # embedding procedure
import math
import logging
import random
from collections import OrderedDict # for deterministic behavior
from bjointsp.overlay.edge import Edge
from bjointsp.overlay.instance import Instance
from bjointsp.overlay.overlay import Overlay
# global variables for easy access by all functions
nodes, links, shortest_paths, overlays = None, None, None, None
# return the outgoing arc of the specified component at the specified output in the specified direction
def out_arc(template, component, output, direction):
out_arcs = [a for a in template.arcs if a.starts_at(direction, output, component)]
# there has to be exactly one arc per input and output; but the arc might belong to another template
if len(out_arcs) == 1:
return out_arcs[0]
elif len(out_arcs) == 0:
return None
else:
raise ValueError("#outgoing arcs of {} at {} output {} is {}. It should be at most 1 per output and template."
.format(component, direction, output, len(out_arcs)))
# remove the specified instance and its in- and outgoing edges from all overlays/specified overlay
# if the instance is stateful, also remove it from passed_stateful of all flows
def remove_instance(instance, overlay=None):
# if an overlay is specified, only remove from that overlay; else from all
if overlay is not None:
overlays_to_update = [overlay]
else:
overlays_to_update = overlays.values()
# remove instance and associated edges from overlays_to_update and update flows
for ol in overlays_to_update:
flows_to_update = [f for e in ol.edges for f in e.flows if instance in f.passed_stateful.values()]
for f in flows_to_update:
f.passed_stateful = {k:v for k, v in f.passed_stateful.items() if v != instance}
if instance in ol.instances:
ol.instances = [i for i in ol.instances if i != instance]
#print("\tRemoved instance {} from overlay of {}".format(instance, ol.template))
logging.info("\tRemoved instance {} from overlay of {}".format(instance, ol.template))
edges_to_remove = [e for e in ol.edges if e.source == instance or e.dest == instance]
for e in edges_to_remove:
remove_edge(e, overlay)
# remove the specified edge from all overlays/specified overlay and instances
def remove_edge(edge, overlay=None):
# remove mapped dr
for f in edge.flows:
del f.dr[edge]
# remove edge from specified overlay or from all (if none is specified) and update flows
for ol in overlays.values():
if ol == overlay or overlay is None:
if edge in ol.edges:
ol.edges.remove(edge)
for i in ol.instances:
i.edges_in = {key: e for key, e in i.edges_in.items() if e != edge}
i.edges_out = {key: e for key, e in i.edges_out.items() if e != edge}
#print("\tRemoved edge {}".format(edge))
logging.info("\tRemoved edge {}".format(edge))
# remove specified flow: remove mapping from/to edges, remove edges that are now "empty" (without mapped flows)
def remove_flow(overlay, flow):
#print("Removing outdated flow {} and corresponding edges (without other flows)".format(flow))
logging.info("Removing outdated flow {} and corresponding edges (without other flows)".format(flow))
for e in list(overlay.edges): # iterate over copy as edges are removed during loop
# remove mappings
if flow in e.flows:
e.flows.remove(flow)
del flow.dr[e]
# remove empty edges
if not e.flows:
remove_edge(e, overlay)
# return dict of currently consumed node resources
# ignore the idle cpu/mem consumption of the instances of component specified in ignore_idle
def consumed_node_resources(ignore_idle=None):
consumed_cpu, consumed_mem = {}, {}
# reused instances exist in multiple overlays with diff ingoing edges -> have to allow duplicates (use list)
instances = [i for t in overlays.keys() for i in overlays[t].instances]
for v in nodes.ids:
consumed_cpu[v] = sum(i.consumed_cpu(ignore_idle) for i in instances if i.location == v)
consumed_mem[v] = sum(i.consumed_mem(ignore_idle) for i in instances if i.location == v)
return consumed_cpu, consumed_mem
# return dict of nodes with enough remaining node resources (based on delta_dr and the components requirements)
# ignoring nodes that are too far away, i.e., with a too high delay, and that are on the tabu list
# keys: nodes, values: (remaining cpu, remaining mem)
def candidate_nodes(start_node, arc, delta_dr, tabu=set()):
# increase ingoing dr: delta_dr at corresponding input, 0 elsewhere
delta_in_dr = []
for i in range(arc.dest.inputs + arc.dest.inputs_back):
if arc.direction == "forward" and i == arc.dest_in:
delta_in_dr.append(delta_dr)
elif arc.direction == "backward" and i == arc.dest.inputs + arc.dest_in:
delta_in_dr.append(delta_dr)
else:
delta_in_dr.append(0)
# get currently consumed node resources without idle consumption of dest-instances (to avoid subtracting it twice)
consumed_cpu, consumed_mem = consumed_node_resources(arc.dest)
# only consider nodes that are close enough (short delay) and that are not on the tabu list for the component
allowed_nodes = [v for v in nodes.ids if shortest_paths[(start_node, v)][2] <= arc.max_delay and (arc.dest, v) not in tabu]
# check each node and add it if it has any of the required resources remaining
candidates = OrderedDict()
for v in allowed_nodes:
remaining_cpu = nodes.cpu[v] - consumed_cpu[v]
remaining_mem = nodes.mem[v] - consumed_mem[v]
if remaining_cpu - arc.dest.cpu_req(delta_in_dr) >= 0 and remaining_mem - arc.dest.mem_req(delta_in_dr) >= 0:
candidates[v] = (remaining_cpu, remaining_mem)
return candidates
# return the best node to create an edge to (from a given location, along a given arc, excluding the tabu-instance)
# FUTURE WORK: favor nodes with suitable instances -> encourage reuse of existing instances -> better objective 2
def find_best_node(overlay, start_location, arc, delta_dr, fixed, tabu):
# candidate nodes with enough remaining node capacity
candidates = candidate_nodes(start_location, arc, delta_dr, tabu)
#print("\tCandidate nodes for component {}:".format(arc.dest))
logging.debug("\tCandidate nodes for component {}:".format(arc.dest))
for v in candidates.keys():
#print("\t\t{} with {}".format(v, candidates[v]))
logging.debug("\t\t{} with {}".format(v, candidates[v]))
# fixed instances need special treatment: cannot be added or removed => enforce reuse
if fixed:
#print("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest))
logging.info("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest))
fixed_nodes = [i.location for i in overlay.instances if i.component == arc.dest and
shortest_paths[(start_location, i.location)][2] <= arc.max_delay]
candidates = {node: resources for node, resources in candidates.items() if node in fixed_nodes}
# check all candidate nodes and place instance at node with lowest resulting path-weight (high dr, low delay)
if len(candidates) > 0:
path_weight = OrderedDict()
for v in candidates.keys():
path_weight[v] = shortest_paths[(start_location, v)][1]
best_node = min(path_weight, key=path_weight.get)
# if no nodes have remaining capacity, choose node with lowest over-subscription (within delay bounds)
else:
#print("No nodes with enough remaining resources. Choosing node with lowest over-subscription.")
logging.info("No nodes enough remaining resources. Choosing node with lowest over-subscription.")
consumed_cpu, consumed_mem = consumed_node_resources()
best_node = None
min_over_subscription = math.inf
min_path_weight = math.inf # path weight of current best node, use as tie breaker
# only allow nodes that are close enough, i.e., with low enough delay, and that are not tabu
allowed_nodes = [v for v in nodes.ids if shortest_paths[(start_location, v)][2] <= arc.max_delay
and (arc.dest, v) not in tabu]
# if fixed, only allow nodes of fixed instances => enforce reuse
if fixed:
allowed_nodes = fixed_nodes
for v in allowed_nodes:
# looking at sum of cpu and memory over-subscription to find nodes with little over-sub of both
over_subscription = (consumed_cpu[v] - nodes.cpu[v]) + (consumed_mem[v] - nodes.mem[v])
if over_subscription <= min_over_subscription:
path_weight = shortest_paths[(start_location, v)][1]
if over_subscription < min_over_subscription or path_weight < min_path_weight:
best_node = v
min_over_subscription = over_subscription
min_path_weight = path_weight
return best_node
# map the specified flow (with specified flow_dr) to a possibly new edge from the start_instance
def map_flow2edge(overlay, start_instance, arc, flow, flow_dr, tabu):
# determine if the instances of the destination component are fixed => if so, cannot place new instances
fixed = False
for i in overlay.instances:
if i.component == arc.dest and i.fixed:
fixed = True
break
best_node = find_best_node(overlay, start_instance.location, arc, flow_dr, fixed, tabu)
# if the instance at best node already exists (e.g., from forward dir), just connect to it, else create anew
# look for existing instance
instance_exists = False
for i in overlay.instances:
if i.component == arc.dest and i.location == best_node:
instance_exists = True
dest_instance = i
break
# create new instance if none exists in the overlay
if not instance_exists:
dest_instance = Instance(arc.dest, best_node)
overlay.instances.append(dest_instance)
#print("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node))
logging.info("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node))
# check if edge to dest_instance already exists
edge_exists = False
if instance_exists:
if dest_instance in start_instance.edges_out.keys():
edge_exists = True
edge = start_instance.edges_out[dest_instance]
# if it doesn't exist, create a new edge and assign a path (shortest path)
if not edge_exists:
edge = Edge(arc, start_instance, dest_instance)
overlay.edges.append(edge)
edge.paths.append(shortest_paths[(start_instance.location, dest_instance.location)][0])
# map flow to edge
flow.dr[edge] = flow_dr
edge.flows.append(flow)
#print("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists))
logging.info("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists))
# map out_flows to edges back to the same stateful instances that were passed in fwd direction
def map_flows2stateful(overlay, start_instance, arc, out_flows):
# remove any existing mappings of flows to edges along the arc
for e in start_instance.edges_out.values():
if e.arc == arc:
e.flows = []
# add currently outgoing flows to edges back to stateful instances (create edges if necessary)
for f in out_flows:
dest_inst = f.passed_stateful[arc.dest]
if dest_inst in start_instance.edges_out.keys():
new_edge = False
edge = start_instance.edges_out[dest_inst]
else:
new_edge = True
edge = Edge(arc, start_instance, dest_inst)
edge.paths.append(shortest_paths[(start_instance.location, dest_inst.location)][0])
overlay.edges.append(edge)
f.dr[edge] = out_flows[f]
edge.flows.append(f)
#print("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge))
logging.info("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge))
# update the mapping of flows leaving the start_instances along the specified edge
def update_flow_mapping(overlay, start_instance, arc, out_flows, tabu):
flow_mapping = {f: e for e in start_instance.edges_out.values() if e.arc == arc for f in e.flows}
# remove outdated flows
for f in list(flow_mapping.keys()):
if f not in out_flows:
del f.dr[flow_mapping[f]]
flow_mapping[f].flows.remove(f)
del flow_mapping[f]
#print("\tRemoved outdated flow {} along {}".format(f, arc))
# enforce return of flows to the same stateful instances as passed in fwd direction
if arc.dest.stateful and arc.direction == "backward":
map_flows2stateful(overlay, start_instance, arc, out_flows)
# update dr of mapped flows and map new ones
else:
# sort flows for determinism and reproducibility (same results with same key)
ordered_flows = [f for f in sorted(out_flows, key=lambda flow: flow.id)]
# shuffle order to achieve different order of mapping in different iterations; maintains determinism and reproducibility (due to same key)
random.shuffle(ordered_flows)
for f in ordered_flows: # sort according to flow.id to ensure determinism
if f in flow_mapping:
f.dr[flow_mapping[f]] = out_flows[f] # update data rate
#print("\tUpdated dr of existing flow {} (Now: {})".format(f, f.dr[flow_mapping[f]]))
# FUTURE WORK: maybe check if capacitiy violated => if yes, reassign flow to different edge; but might also be fixed during iterative improvement
else:
map_flow2edge(overlay, start_instance, arc, f, out_flows[f], tabu)
# FUTURE WORK: maybe try to minimize number of edges or number of new edges by combining flows to one edge or preferring existing edges (opj 2)
# remove empty edges
for e in start_instance.edges_out.values():
if e.arc == arc and not e.flows:
#print("\nRemoved empty edge {}".format(e))
logging.info("\nRemoved empty edge {}".format(e))
remove_edge(e, overlay)
# update sources (add, rem), update source flows, reset passed_stateful of all flows
def update_sources(overlay, sources):
# reset passed_stateful for all flows (set up to date later) and remove outdated flows
#print("Reset passed_stateful for all flows of template {}".format(overlay.template))
src_flows = {f for src in sources for f in src.flows}
mapped_flows = {f for e in overlay.edges for f in e.flows} | {f for src in sources for f in src.flows}
for f in mapped_flows:
f.passed_stateful.clear()
if f not in src_flows:
remove_flow(overlay, f)
# add/update source instances
for src in sources:
# get existing source instance at the location
src_exists = False
for i in overlay.instances:
if i.component == src.component and i.location == src.location:
src_exists = True
break
# update or add source instance depending on whether such an instance already exists or not
if src_exists:
# remove outdated flows
for f in i.src_flows:
if f not in src.flows:
i.src_flows.remove(f)
for e in f.dr:
e.flows.remove(f)
f.dr.clear()
f.passed_stateful.clear()
# update or add new flows
for f in src.flows:
# if the flow already exists, keep the existing flow and only update its src_dr
if f in i.src_flows:
new_src_dr = f.src_dr
f = i.src_flows[i.src_flows.index(f)] # get existing flow object in i.src_flows
f.src_dr = new_src_dr
# else add the new flow
else:
i.src_flows.append(f)
f.passed_stateful[i.component] = i
#print("Updated/checked src_flows of existing source instance {}".format(i))
logging.info("Updated/checked src_flows of existing source instance {}".format(i))
else:
src_instance = Instance(src.component, src.location, src.flows)
overlay.instances.append(src_instance)
#print("Added new source instance {}".format(src_instance))
logging.info("Added new source instance {}".format(src_instance))
# remove old source instances without source
source_instances = [i for i in overlay.instances if i.component.source]
for src in source_instances:
corresponding_sources = {s for s in sources if s.component == src.component and s.location == src.location}
if len(corresponding_sources) == 0:
#print("Remove source instance {} without corresponding source".format(src))
logging.info("Remove source instance {} without corresponding source".format(src))
remove_instance(src)
# create an initial solution for the provided input
def solve(arg_nodes, arg_links, templates, prev_overlays, sources, fixed, arg_shortest_paths, tabu=set()):
# print("Previous overlays:")
# for ol in prev_overlays.values():
# ol.print()
# tabu_string = ""
# for i in tabu:
# tabu_string += "({},{}) ".format(i[0], i[1])
# print("Tabu list: {}".format(tabu_string))
# write global variables
global nodes, links, shortest_paths, overlays
nodes = arg_nodes
links = arg_links
shortest_paths = arg_shortest_paths
# keep previous overlays of templates that still exist
overlays = {t: ol for t, ol in prev_overlays.items() if t in templates}
# create empty overlays for new templates
for t in templates:
if t not in overlays.keys():
overlays[t] = Overlay(t, [], [])
#print("Created empty overlay for new template {}".format(t))
logging.info("Created empty overlay for new template {}".format(t))
# remove all instances of fixed components => curr fixed instances added again later; prev fixed instances removed
fixed_components = {f.component for f in fixed}
fixed_instances = {i for ol in overlays.values() for i in ol.instances if i.component in fixed_components}
#print("Remove any existing fixed instances:", *fixed_instances, sep=" ")
for i in fixed_instances:
remove_instance(i)
# embed templates sequentially in given order
for t in templates:
#print("\n-Embedding template: {}-".format(t))
logging.info("-Embedding template: {}-".format(t))
own_sources = [src for src in sources if src.component in t.components]
update_sources(overlays[t], own_sources)
# add fixed instances that match template t's components
for f in fixed:
if f.component in t.components:
fixed_instance = Instance(f.component, f.location, fixed=True)
if fixed_instance not in overlays[t].instances:
overlays[t].instances.append(fixed_instance)
#print("Added fixed instance of {} at {}".format(f.component, f.location))
logging.info("Added fixed instance of {} at {}".format(f.component, f.location))
# iterate over all instances in topological order; start in forward direction then switch to backward
i = 0
direction = "forward"
while i < len(overlays[t].topological_order()):
instance = overlays[t].topological_order()[i]
# #print("Topological order:", *overlays[t].topological_order(), sep=" ")
# remove unused instances (except fixed instances)
if not instance.fixed:
if not instance.used(direction, overlays[t]):
#print("Removed unused instance {} from overlay of {}".format(instance, t))
logging.info("Removed unused instance {} from overlay of {}".format(instance, t))
remove_instance(instance, overlays[t])
continue
# switch direction at the first instance of an end component (bc outgoing not ingoing direction considered)
if instance.component.end:
direction = "backward"
# get outgoing flows (and their dr) for each output
out_flows = instance.out_flows(direction)
for k in range(len(out_flows)):
arc = out_arc(t, instance.component, k, direction)
# when a component is adapted for reuse, it has separate outputs for the arcs of different templates
if arc is None: # for output k, this template has no arc => skip to next output
#print("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction))
logging.debug("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction))
continue
update_flow_mapping(overlays[t], instance, arc, out_flows[k], tabu)
#print("Updated the flow mapping along arc {} at {}\n".format(arc, instance))
logging.info("Updated the flow mapping along arc {} at {}\n".format(arc, instance))
i += 1
#print()
if overlays[t].empty():
del overlays[t]
#print("Deleted empty overlay of {}".format(t))
logging.info("Deleted empty overlay of {}".format(t))
# else:
#overlays[t].print()
#print("Topological order:", *overlays[t].topological_order(), sep=" ")
#print()
return overlays
| # embedding procedure
import math
import logging
import random
from collections import OrderedDict # for deterministic behavior
from bjointsp.overlay.edge import Edge
from bjointsp.overlay.instance import Instance
from bjointsp.overlay.overlay import Overlay
# global variables for easy access by all functions
nodes, links, shortest_paths, overlays = None, None, None, None
# return the outgoing arc of the specified component at the specified output in the specified direction
def out_arc(template, component, output, direction):
out_arcs = [a for a in template.arcs if a.starts_at(direction, output, component)]
# there has to be exactly one arc per input and output; but the arc might belong to another template
if len(out_arcs) == 1:
return out_arcs[0]
elif len(out_arcs) == 0:
return None
else:
raise ValueError("#outgoing arcs of {} at {} output {} is {}. It should be at most 1 per output and template."
.format(component, direction, output, len(out_arcs)))
# remove the specified instance and its in- and outgoing edges from all overlays/specified overlay
# if the instance is stateful, also remove it from passed_stateful of all flows
def remove_instance(instance, overlay=None):
# if an overlay is specified, only remove from that overlay; else from all
if overlay is not None:
overlays_to_update = [overlay]
else:
overlays_to_update = overlays.values()
# remove instance and associated edges from overlays_to_update and update flows
for ol in overlays_to_update:
flows_to_update = [f for e in ol.edges for f in e.flows if instance in f.passed_stateful.values()]
for f in flows_to_update:
f.passed_stateful = {k:v for k, v in f.passed_stateful.items() if v != instance}
if instance in ol.instances:
ol.instances = [i for i in ol.instances if i != instance]
#print("\tRemoved instance {} from overlay of {}".format(instance, ol.template))
logging.info("\tRemoved instance {} from overlay of {}".format(instance, ol.template))
edges_to_remove = [e for e in ol.edges if e.source == instance or e.dest == instance]
for e in edges_to_remove:
remove_edge(e, overlay)
# remove the specified edge from all overlays/specified overlay and instances
def remove_edge(edge, overlay=None):
# remove mapped dr
for f in edge.flows:
del f.dr[edge]
# remove edge from specified overlay or from all (if none is specified) and update flows
for ol in overlays.values():
if ol == overlay or overlay is None:
if edge in ol.edges:
ol.edges.remove(edge)
for i in ol.instances:
i.edges_in = {key: e for key, e in i.edges_in.items() if e != edge}
i.edges_out = {key: e for key, e in i.edges_out.items() if e != edge}
#print("\tRemoved edge {}".format(edge))
logging.info("\tRemoved edge {}".format(edge))
# remove specified flow: remove mapping from/to edges, remove edges that are now "empty" (without mapped flows)
def remove_flow(overlay, flow):
#print("Removing outdated flow {} and corresponding edges (without other flows)".format(flow))
logging.info("Removing outdated flow {} and corresponding edges (without other flows)".format(flow))
for e in list(overlay.edges): # iterate over copy as edges are removed during loop
# remove mappings
if flow in e.flows:
e.flows.remove(flow)
del flow.dr[e]
# remove empty edges
if not e.flows:
remove_edge(e, overlay)
# return dict of currently consumed node resources
# ignore the idle cpu/mem consumption of the instances of component specified in ignore_idle
def consumed_node_resources(ignore_idle=None):
consumed_cpu, consumed_mem = {}, {}
# reused instances exist in multiple overlays with diff ingoing edges -> have to allow duplicates (use list)
instances = [i for t in overlays.keys() for i in overlays[t].instances]
for v in nodes.ids:
consumed_cpu[v] = sum(i.consumed_cpu(ignore_idle) for i in instances if i.location == v)
consumed_mem[v] = sum(i.consumed_mem(ignore_idle) for i in instances if i.location == v)
return consumed_cpu, consumed_mem
# return dict of nodes with enough remaining node resources (based on delta_dr and the components requirements)
# ignoring nodes that are too far away, i.e., with a too high delay, and that are on the tabu list
# keys: nodes, values: (remaining cpu, remaining mem)
def candidate_nodes(start_node, arc, delta_dr, tabu=set()):
# increase ingoing dr: delta_dr at corresponding input, 0 elsewhere
delta_in_dr = []
for i in range(arc.dest.inputs + arc.dest.inputs_back):
if arc.direction == "forward" and i == arc.dest_in:
delta_in_dr.append(delta_dr)
elif arc.direction == "backward" and i == arc.dest.inputs + arc.dest_in:
delta_in_dr.append(delta_dr)
else:
delta_in_dr.append(0)
# get currently consumed node resources without idle consumption of dest-instances (to avoid subtracting it twice)
consumed_cpu, consumed_mem = consumed_node_resources(arc.dest)
# only consider nodes that are close enough (short delay) and that are not on the tabu list for the component
allowed_nodes = [v for v in nodes.ids if shortest_paths[(start_node, v)][2] <= arc.max_delay and (arc.dest, v) not in tabu]
# check each node and add it if it has any of the required resources remaining
candidates = OrderedDict()
for v in allowed_nodes:
remaining_cpu = nodes.cpu[v] - consumed_cpu[v]
remaining_mem = nodes.mem[v] - consumed_mem[v]
if remaining_cpu - arc.dest.cpu_req(delta_in_dr) >= 0 and remaining_mem - arc.dest.mem_req(delta_in_dr) >= 0:
candidates[v] = (remaining_cpu, remaining_mem)
return candidates
# return the best node to create an edge to (from a given location, along a given arc, excluding the tabu-instance)
# FUTURE WORK: favor nodes with suitable instances -> encourage reuse of existing instances -> better objective 2
def find_best_node(overlay, start_location, arc, delta_dr, fixed, tabu):
# candidate nodes with enough remaining node capacity
candidates = candidate_nodes(start_location, arc, delta_dr, tabu)
#print("\tCandidate nodes for component {}:".format(arc.dest))
logging.debug("\tCandidate nodes for component {}:".format(arc.dest))
for v in candidates.keys():
#print("\t\t{} with {}".format(v, candidates[v]))
logging.debug("\t\t{} with {}".format(v, candidates[v]))
# fixed instances need special treatment: cannot be added or removed => enforce reuse
if fixed:
#print("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest))
logging.info("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest))
fixed_nodes = [i.location for i in overlay.instances if i.component == arc.dest and
shortest_paths[(start_location, i.location)][2] <= arc.max_delay]
candidates = {node: resources for node, resources in candidates.items() if node in fixed_nodes}
# check all candidate nodes and place instance at node with lowest resulting path-weight (high dr, low delay)
if len(candidates) > 0:
path_weight = OrderedDict()
for v in candidates.keys():
path_weight[v] = shortest_paths[(start_location, v)][1]
best_node = min(path_weight, key=path_weight.get)
# if no nodes have remaining capacity, choose node with lowest over-subscription (within delay bounds)
else:
#print("No nodes with enough remaining resources. Choosing node with lowest over-subscription.")
logging.info("No nodes enough remaining resources. Choosing node with lowest over-subscription.")
consumed_cpu, consumed_mem = consumed_node_resources()
best_node = None
min_over_subscription = math.inf
min_path_weight = math.inf # path weight of current best node, use as tie breaker
# only allow nodes that are close enough, i.e., with low enough delay, and that are not tabu
allowed_nodes = [v for v in nodes.ids if shortest_paths[(start_location, v)][2] <= arc.max_delay
and (arc.dest, v) not in tabu]
# if fixed, only allow nodes of fixed instances => enforce reuse
if fixed:
allowed_nodes = fixed_nodes
for v in allowed_nodes:
# looking at sum of cpu and memory over-subscription to find nodes with little over-sub of both
over_subscription = (consumed_cpu[v] - nodes.cpu[v]) + (consumed_mem[v] - nodes.mem[v])
if over_subscription <= min_over_subscription:
path_weight = shortest_paths[(start_location, v)][1]
if over_subscription < min_over_subscription or path_weight < min_path_weight:
best_node = v
min_over_subscription = over_subscription
min_path_weight = path_weight
return best_node
# map the specified flow (with specified flow_dr) to a possibly new edge from the start_instance
def map_flow2edge(overlay, start_instance, arc, flow, flow_dr, tabu):
# determine if the instances of the destination component are fixed => if so, cannot place new instances
fixed = False
for i in overlay.instances:
if i.component == arc.dest and i.fixed:
fixed = True
break
best_node = find_best_node(overlay, start_instance.location, arc, flow_dr, fixed, tabu)
# if the instance at best node already exists (e.g., from forward dir), just connect to it, else create anew
# look for existing instance
instance_exists = False
for i in overlay.instances:
if i.component == arc.dest and i.location == best_node:
instance_exists = True
dest_instance = i
break
# create new instance if none exists in the overlay
if not instance_exists:
dest_instance = Instance(arc.dest, best_node)
overlay.instances.append(dest_instance)
#print("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node))
logging.info("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node))
# check if edge to dest_instance already exists
edge_exists = False
if instance_exists:
if dest_instance in start_instance.edges_out.keys():
edge_exists = True
edge = start_instance.edges_out[dest_instance]
# if it doesn't exist, create a new edge and assign a path (shortest path)
if not edge_exists:
edge = Edge(arc, start_instance, dest_instance)
overlay.edges.append(edge)
edge.paths.append(shortest_paths[(start_instance.location, dest_instance.location)][0])
# map flow to edge
flow.dr[edge] = flow_dr
edge.flows.append(flow)
#print("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists))
logging.info("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists))
# map out_flows to edges back to the same stateful instances that were passed in fwd direction
def map_flows2stateful(overlay, start_instance, arc, out_flows):
# remove any existing mappings of flows to edges along the arc
for e in start_instance.edges_out.values():
if e.arc == arc:
e.flows = []
# add currently outgoing flows to edges back to stateful instances (create edges if necessary)
for f in out_flows:
dest_inst = f.passed_stateful[arc.dest]
if dest_inst in start_instance.edges_out.keys():
new_edge = False
edge = start_instance.edges_out[dest_inst]
else:
new_edge = True
edge = Edge(arc, start_instance, dest_inst)
edge.paths.append(shortest_paths[(start_instance.location, dest_inst.location)][0])
overlay.edges.append(edge)
f.dr[edge] = out_flows[f]
edge.flows.append(f)
#print("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge))
logging.info("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge))
# update the mapping of flows leaving the start_instances along the specified edge
def update_flow_mapping(overlay, start_instance, arc, out_flows, tabu):
flow_mapping = {f: e for e in start_instance.edges_out.values() if e.arc == arc for f in e.flows}
# remove outdated flows
for f in list(flow_mapping.keys()):
if f not in out_flows:
del f.dr[flow_mapping[f]]
flow_mapping[f].flows.remove(f)
del flow_mapping[f]
#print("\tRemoved outdated flow {} along {}".format(f, arc))
# enforce return of flows to the same stateful instances as passed in fwd direction
if arc.dest.stateful and arc.direction == "backward":
map_flows2stateful(overlay, start_instance, arc, out_flows)
# update dr of mapped flows and map new ones
else:
# sort flows for determinism and reproducibility (same results with same key)
ordered_flows = [f for f in sorted(out_flows, key=lambda flow: flow.id)]
# shuffle order to achieve different order of mapping in different iterations; maintains determinism and reproducibility (due to same key)
random.shuffle(ordered_flows)
for f in ordered_flows: # sort according to flow.id to ensure determinism
if f in flow_mapping:
f.dr[flow_mapping[f]] = out_flows[f] # update data rate
#print("\tUpdated dr of existing flow {} (Now: {})".format(f, f.dr[flow_mapping[f]]))
# FUTURE WORK: maybe check if capacitiy violated => if yes, reassign flow to different edge; but might also be fixed during iterative improvement
else:
map_flow2edge(overlay, start_instance, arc, f, out_flows[f], tabu)
# FUTURE WORK: maybe try to minimize number of edges or number of new edges by combining flows to one edge or preferring existing edges (opj 2)
# remove empty edges
for e in start_instance.edges_out.values():
if e.arc == arc and not e.flows:
#print("\nRemoved empty edge {}".format(e))
logging.info("\nRemoved empty edge {}".format(e))
remove_edge(e, overlay)
# update sources (add, rem), update source flows, reset passed_stateful of all flows
def update_sources(overlay, sources):
# reset passed_stateful for all flows (set up to date later) and remove outdated flows
#print("Reset passed_stateful for all flows of template {}".format(overlay.template))
src_flows = {f for src in sources for f in src.flows}
mapped_flows = {f for e in overlay.edges for f in e.flows} | {f for src in sources for f in src.flows}
for f in mapped_flows:
f.passed_stateful.clear()
if f not in src_flows:
remove_flow(overlay, f)
# add/update source instances
for src in sources:
# get existing source instance at the location
src_exists = False
for i in overlay.instances:
if i.component == src.component and i.location == src.location:
src_exists = True
break
# update or add source instance depending on whether such an instance already exists or not
if src_exists:
# remove outdated flows
for f in i.src_flows:
if f not in src.flows:
i.src_flows.remove(f)
for e in f.dr:
e.flows.remove(f)
f.dr.clear()
f.passed_stateful.clear()
# update or add new flows
for f in src.flows:
# if the flow already exists, keep the existing flow and only update its src_dr
if f in i.src_flows:
new_src_dr = f.src_dr
f = i.src_flows[i.src_flows.index(f)] # get existing flow object in i.src_flows
f.src_dr = new_src_dr
# else add the new flow
else:
i.src_flows.append(f)
f.passed_stateful[i.component] = i
#print("Updated/checked src_flows of existing source instance {}".format(i))
logging.info("Updated/checked src_flows of existing source instance {}".format(i))
else:
src_instance = Instance(src.component, src.location, src.flows)
overlay.instances.append(src_instance)
#print("Added new source instance {}".format(src_instance))
logging.info("Added new source instance {}".format(src_instance))
# remove old source instances without source
source_instances = [i for i in overlay.instances if i.component.source]
for src in source_instances:
corresponding_sources = {s for s in sources if s.component == src.component and s.location == src.location}
if len(corresponding_sources) == 0:
#print("Remove source instance {} without corresponding source".format(src))
logging.info("Remove source instance {} without corresponding source".format(src))
remove_instance(src)
# create an initial solution for the provided input
def solve(arg_nodes, arg_links, templates, prev_overlays, sources, fixed, arg_shortest_paths, tabu=set()):
# print("Previous overlays:")
# for ol in prev_overlays.values():
# ol.print()
# tabu_string = ""
# for i in tabu:
# tabu_string += "({},{}) ".format(i[0], i[1])
# print("Tabu list: {}".format(tabu_string))
# write global variables
global nodes, links, shortest_paths, overlays
nodes = arg_nodes
links = arg_links
shortest_paths = arg_shortest_paths
# keep previous overlays of templates that still exist
overlays = {t: ol for t, ol in prev_overlays.items() if t in templates}
# create empty overlays for new templates
for t in templates:
if t not in overlays.keys():
overlays[t] = Overlay(t, [], [])
#print("Created empty overlay for new template {}".format(t))
logging.info("Created empty overlay for new template {}".format(t))
# remove all instances of fixed components => curr fixed instances added again later; prev fixed instances removed
fixed_components = {f.component for f in fixed}
fixed_instances = {i for ol in overlays.values() for i in ol.instances if i.component in fixed_components}
#print("Remove any existing fixed instances:", *fixed_instances, sep=" ")
for i in fixed_instances:
remove_instance(i)
# embed templates sequentially in given order
for t in templates:
#print("\n-Embedding template: {}-".format(t))
logging.info("-Embedding template: {}-".format(t))
own_sources = [src for src in sources if src.component in t.components]
update_sources(overlays[t], own_sources)
# add fixed instances that match template t's components
for f in fixed:
if f.component in t.components:
fixed_instance = Instance(f.component, f.location, fixed=True)
if fixed_instance not in overlays[t].instances:
overlays[t].instances.append(fixed_instance)
#print("Added fixed instance of {} at {}".format(f.component, f.location))
logging.info("Added fixed instance of {} at {}".format(f.component, f.location))
# iterate over all instances in topological order; start in forward direction then switch to backward
i = 0
direction = "forward"
while i < len(overlays[t].topological_order()):
instance = overlays[t].topological_order()[i]
# #print("Topological order:", *overlays[t].topological_order(), sep=" ")
# remove unused instances (except fixed instances)
if not instance.fixed:
if not instance.used(direction, overlays[t]):
#print("Removed unused instance {} from overlay of {}".format(instance, t))
logging.info("Removed unused instance {} from overlay of {}".format(instance, t))
remove_instance(instance, overlays[t])
continue
# switch direction at the first instance of an end component (bc outgoing not ingoing direction considered)
if instance.component.end:
direction = "backward"
# get outgoing flows (and their dr) for each output
out_flows = instance.out_flows(direction)
for k in range(len(out_flows)):
arc = out_arc(t, instance.component, k, direction)
# when a component is adapted for reuse, it has separate outputs for the arcs of different templates
if arc is None: # for output k, this template has no arc => skip to next output
#print("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction))
logging.debug("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction))
continue
update_flow_mapping(overlays[t], instance, arc, out_flows[k], tabu)
#print("Updated the flow mapping along arc {} at {}\n".format(arc, instance))
logging.info("Updated the flow mapping along arc {} at {}\n".format(arc, instance))
i += 1
#print()
if overlays[t].empty():
del overlays[t]
#print("Deleted empty overlay of {}".format(t))
logging.info("Deleted empty overlay of {}".format(t))
# else:
#overlays[t].print()
#print("Topological order:", *overlays[t].topological_order(), sep=" ")
#print()
return overlays
| en | 0.767519 | # embedding procedure # for deterministic behavior # global variables for easy access by all functions # return the outgoing arc of the specified component at the specified output in the specified direction # there has to be exactly one arc per input and output; but the arc might belong to another template # remove the specified instance and its in- and outgoing edges from all overlays/specified overlay # if the instance is stateful, also remove it from passed_stateful of all flows # if an overlay is specified, only remove from that overlay; else from all # remove instance and associated edges from overlays_to_update and update flows #print("\tRemoved instance {} from overlay of {}".format(instance, ol.template)) # remove the specified edge from all overlays/specified overlay and instances # remove mapped dr # remove edge from specified overlay or from all (if none is specified) and update flows #print("\tRemoved edge {}".format(edge)) # remove specified flow: remove mapping from/to edges, remove edges that are now "empty" (without mapped flows) #print("Removing outdated flow {} and corresponding edges (without other flows)".format(flow)) # iterate over copy as edges are removed during loop # remove mappings # remove empty edges # return dict of currently consumed node resources # ignore the idle cpu/mem consumption of the instances of component specified in ignore_idle # reused instances exist in multiple overlays with diff ingoing edges -> have to allow duplicates (use list) # return dict of nodes with enough remaining node resources (based on delta_dr and the components requirements) # ignoring nodes that are too far away, i.e., with a too high delay, and that are on the tabu list # keys: nodes, values: (remaining cpu, remaining mem) # increase ingoing dr: delta_dr at corresponding input, 0 elsewhere # get currently consumed node resources without idle consumption of dest-instances (to avoid subtracting it twice) # only consider nodes that are close enough (short delay) and that are not on the tabu list for the component # check each node and add it if it has any of the required resources remaining # return the best node to create an edge to (from a given location, along a given arc, excluding the tabu-instance) # FUTURE WORK: favor nodes with suitable instances -> encourage reuse of existing instances -> better objective 2 # candidate nodes with enough remaining node capacity #print("\tCandidate nodes for component {}:".format(arc.dest)) #print("\t\t{} with {}".format(v, candidates[v])) # fixed instances need special treatment: cannot be added or removed => enforce reuse #print("Component {} has fixed instances, which have to be used (no new instances allowed)".format(arc.dest)) # check all candidate nodes and place instance at node with lowest resulting path-weight (high dr, low delay) # if no nodes have remaining capacity, choose node with lowest over-subscription (within delay bounds) #print("No nodes with enough remaining resources. Choosing node with lowest over-subscription.") # path weight of current best node, use as tie breaker # only allow nodes that are close enough, i.e., with low enough delay, and that are not tabu # if fixed, only allow nodes of fixed instances => enforce reuse # looking at sum of cpu and memory over-subscription to find nodes with little over-sub of both # map the specified flow (with specified flow_dr) to a possibly new edge from the start_instance # determine if the instances of the destination component are fixed => if so, cannot place new instances # if the instance at best node already exists (e.g., from forward dir), just connect to it, else create anew # look for existing instance # create new instance if none exists in the overlay #print("\tAdded new instance {} at best node {} (may exist in other overlays)".format(dest_instance, best_node)) # check if edge to dest_instance already exists # if it doesn't exist, create a new edge and assign a path (shortest path) # map flow to edge #print("\tMapped flow {} (dr {}) to edge {} (new: {})".format(flow, flow_dr, edge, not edge_exists)) # map out_flows to edges back to the same stateful instances that were passed in fwd direction # remove any existing mappings of flows to edges along the arc # add currently outgoing flows to edges back to stateful instances (create edges if necessary) #print("\tMapped flow {} (dr {}) to edge {} (new: {}) back to same stateful instance".format(f, out_flows[f], edge, new_edge)) # update the mapping of flows leaving the start_instances along the specified edge # remove outdated flows #print("\tRemoved outdated flow {} along {}".format(f, arc)) # enforce return of flows to the same stateful instances as passed in fwd direction # update dr of mapped flows and map new ones # sort flows for determinism and reproducibility (same results with same key) # shuffle order to achieve different order of mapping in different iterations; maintains determinism and reproducibility (due to same key) # sort according to flow.id to ensure determinism # update data rate #print("\tUpdated dr of existing flow {} (Now: {})".format(f, f.dr[flow_mapping[f]])) # FUTURE WORK: maybe check if capacitiy violated => if yes, reassign flow to different edge; but might also be fixed during iterative improvement # FUTURE WORK: maybe try to minimize number of edges or number of new edges by combining flows to one edge or preferring existing edges (opj 2) # remove empty edges #print("\nRemoved empty edge {}".format(e)) # update sources (add, rem), update source flows, reset passed_stateful of all flows # reset passed_stateful for all flows (set up to date later) and remove outdated flows #print("Reset passed_stateful for all flows of template {}".format(overlay.template)) # add/update source instances # get existing source instance at the location # update or add source instance depending on whether such an instance already exists or not # remove outdated flows # update or add new flows # if the flow already exists, keep the existing flow and only update its src_dr # get existing flow object in i.src_flows # else add the new flow #print("Updated/checked src_flows of existing source instance {}".format(i)) #print("Added new source instance {}".format(src_instance)) # remove old source instances without source #print("Remove source instance {} without corresponding source".format(src)) # create an initial solution for the provided input # print("Previous overlays:") # for ol in prev_overlays.values(): # ol.print() # tabu_string = "" # for i in tabu: # tabu_string += "({},{}) ".format(i[0], i[1]) # print("Tabu list: {}".format(tabu_string)) # write global variables # keep previous overlays of templates that still exist # create empty overlays for new templates #print("Created empty overlay for new template {}".format(t)) # remove all instances of fixed components => curr fixed instances added again later; prev fixed instances removed #print("Remove any existing fixed instances:", *fixed_instances, sep=" ") # embed templates sequentially in given order #print("\n-Embedding template: {}-".format(t)) # add fixed instances that match template t's components #print("Added fixed instance of {} at {}".format(f.component, f.location)) # iterate over all instances in topological order; start in forward direction then switch to backward # #print("Topological order:", *overlays[t].topological_order(), sep=" ") # remove unused instances (except fixed instances) #print("Removed unused instance {} from overlay of {}".format(instance, t)) # switch direction at the first instance of an end component (bc outgoing not ingoing direction considered) # get outgoing flows (and their dr) for each output # when a component is adapted for reuse, it has separate outputs for the arcs of different templates # for output k, this template has no arc => skip to next output #print("{}'s outgoing arc at output {} in {} direction belongs to a different template. The output is skipped".format(instance, k, direction)) #print("Updated the flow mapping along arc {} at {}\n".format(arc, instance)) #print() #print("Deleted empty overlay of {}".format(t)) # else: #overlays[t].print() #print("Topological order:", *overlays[t].topological_order(), sep=" ") #print() | 2.478832 | 2 |
berdi/Section_04_Final_Data_Merge_and_Visualization/bundle/bundle_utilites.py | iVibudh/CER-ESA-Phase2 | 2 | 6630139 | <gh_stars>1-10
import os
import shutil
def bundle_for_project(df_index, project_folder_name, new_folder_projects, csv_file_folder, columns_index, readme_project_filepath, is_french=False):
print('Start processing for project: {}'.format(project_folder_name))
if is_french:
column_name_download_folder_name = 'Télécharger le nom du dossier'
column_name_filename = 'Nom du CSV'
column_name_pdf_page_number = 'Numéro de page PDF'
index_filename = 'INDEX_PROJET.csv'
encode = 'latin-1'
else:
column_name_download_folder_name = 'Download folder name'
column_name_filename = 'filename'
column_name_pdf_page_number = 'PDF Page Number'
index_filename = 'INDEX_PROJECT.csv'
encode = 'utf-8'
df_project = df_index[df_index[column_name_download_folder_name] == project_folder_name]
# Create new project folder
new_project_folder = os.path.join(new_folder_projects, project_folder_name)
if not os.path.exists(new_project_folder):
os.mkdir(new_project_folder)
# Iterate over the table ids and create zip files of tables
for table_id in df_project['ID'].unique():
# Create a temporary folder in the new project folder for zipping csv files
temp_folder_for_bundling = os.path.join(new_project_folder, 'temp-{}'.format(table_id))
if not os.path.exists(temp_folder_for_bundling):
os.mkdir(temp_folder_for_bundling)
# Copy the csv files of one table to the temporary folder in the new project folder
df_table = df_project[df_project['ID'] == table_id]
for csv in df_table[column_name_filename]:
if os.path.exists(os.path.join(csv_file_folder, csv)):
shutil.copy(os.path.join(csv_file_folder, csv), os.path.join(temp_folder_for_bundling, csv))
else:
print('File missing: {}'.format(os.path.join(csv_file_folder, csv)))
# Create a zip file of the table csvs
zipfile_name = df_table['Table Name'].iloc[0]
shutil.make_archive(os.path.join(new_project_folder, zipfile_name), 'zip', temp_folder_for_bundling)
# Delete the temporary folder after zipping csv files of a table
shutil.rmtree(temp_folder_for_bundling, ignore_errors=True)
# Create index file for the project
df_project_index = df_project.sort_values(['ID', column_name_pdf_page_number])\
.groupby('ID').first().reset_index()[columns_index]
df_project_index.to_csv(os.path.join(new_project_folder, index_filename), index=False, encoding=encode)
# Create readme.txt
shutil.copy(readme_project_filepath, os.path.join(new_project_folder, 'readme.txt'))
# Create project zip file
shutil.make_archive(new_project_folder, 'zip', new_folder_projects, project_folder_name)
# Delete project folder
shutil.rmtree(new_project_folder, ignore_errors=True)
def bundle_for_table(df_index, table_id, new_folder_tables, csv_file_folder, columns_index, readme_project_filepath, is_french=False):
print('Start processing table - table id: {}'.format(table_id))
if is_french:
column_name_filename = 'Nom du CSV'
column_name_pdf_page_number = 'Numéro de page PDF'
else:
column_name_filename = 'filename'
column_name_pdf_page_number = 'PDF Page Number'
df_table = df_index[df_index['ID'] == table_id]
# Create a temporary folder in the new tables folder for zipping csv files
temp_folder_for_bundling = os.path.join(new_folder_tables, 'temp-{}'.format(table_id))
if not os.path.exists(temp_folder_for_bundling):
os.mkdir(temp_folder_for_bundling)
# Copy the csv files of one table to the temporary folder in the new tables folder
for _, row in df_table.iterrows():
csv = row[column_name_filename]
if os.path.exists(os.path.join(csv_file_folder, csv)):
shutil.copy(os.path.join(csv_file_folder, csv),
os.path.join(temp_folder_for_bundling, csv))
else:
print('File missing: {}'.format(os.path.join(csv_file_folder, csv)))
# Create readme.txt by append table metadata to the generic readme file
readme_table_filepath = os.path.join(temp_folder_for_bundling, 'readme.txt')
shutil.copy(readme_project_filepath, readme_table_filepath)
with open(readme_table_filepath, 'a', encoding="utf-8") as file:
metadata = ''
for col in columns_index:
if col == column_name_pdf_page_number and df_table[col].min() != df_table[col].max():
metadata += '{}: {} - {}\n'.format(col, df_table[col].min(), df_table[col].max())
else:
metadata += '{}: {}\n'.format(col, df_table.iloc[0][col])
file.write(metadata)
# Create a zip file of the table csvs and readme.txt
zipfile_name = df_table['Table Name'].iloc[0]
shutil.make_archive(os.path.join(new_folder_tables, zipfile_name), 'zip', temp_folder_for_bundling)
# Delete temp folder
shutil.rmtree(temp_folder_for_bundling, ignore_errors=True)
| import os
import shutil
def bundle_for_project(df_index, project_folder_name, new_folder_projects, csv_file_folder, columns_index, readme_project_filepath, is_french=False):
print('Start processing for project: {}'.format(project_folder_name))
if is_french:
column_name_download_folder_name = 'Télécharger le nom du dossier'
column_name_filename = 'Nom du CSV'
column_name_pdf_page_number = 'Numéro de page PDF'
index_filename = 'INDEX_PROJET.csv'
encode = 'latin-1'
else:
column_name_download_folder_name = 'Download folder name'
column_name_filename = 'filename'
column_name_pdf_page_number = 'PDF Page Number'
index_filename = 'INDEX_PROJECT.csv'
encode = 'utf-8'
df_project = df_index[df_index[column_name_download_folder_name] == project_folder_name]
# Create new project folder
new_project_folder = os.path.join(new_folder_projects, project_folder_name)
if not os.path.exists(new_project_folder):
os.mkdir(new_project_folder)
# Iterate over the table ids and create zip files of tables
for table_id in df_project['ID'].unique():
# Create a temporary folder in the new project folder for zipping csv files
temp_folder_for_bundling = os.path.join(new_project_folder, 'temp-{}'.format(table_id))
if not os.path.exists(temp_folder_for_bundling):
os.mkdir(temp_folder_for_bundling)
# Copy the csv files of one table to the temporary folder in the new project folder
df_table = df_project[df_project['ID'] == table_id]
for csv in df_table[column_name_filename]:
if os.path.exists(os.path.join(csv_file_folder, csv)):
shutil.copy(os.path.join(csv_file_folder, csv), os.path.join(temp_folder_for_bundling, csv))
else:
print('File missing: {}'.format(os.path.join(csv_file_folder, csv)))
# Create a zip file of the table csvs
zipfile_name = df_table['Table Name'].iloc[0]
shutil.make_archive(os.path.join(new_project_folder, zipfile_name), 'zip', temp_folder_for_bundling)
# Delete the temporary folder after zipping csv files of a table
shutil.rmtree(temp_folder_for_bundling, ignore_errors=True)
# Create index file for the project
df_project_index = df_project.sort_values(['ID', column_name_pdf_page_number])\
.groupby('ID').first().reset_index()[columns_index]
df_project_index.to_csv(os.path.join(new_project_folder, index_filename), index=False, encoding=encode)
# Create readme.txt
shutil.copy(readme_project_filepath, os.path.join(new_project_folder, 'readme.txt'))
# Create project zip file
shutil.make_archive(new_project_folder, 'zip', new_folder_projects, project_folder_name)
# Delete project folder
shutil.rmtree(new_project_folder, ignore_errors=True)
def bundle_for_table(df_index, table_id, new_folder_tables, csv_file_folder, columns_index, readme_project_filepath, is_french=False):
print('Start processing table - table id: {}'.format(table_id))
if is_french:
column_name_filename = 'Nom du CSV'
column_name_pdf_page_number = 'Numéro de page PDF'
else:
column_name_filename = 'filename'
column_name_pdf_page_number = 'PDF Page Number'
df_table = df_index[df_index['ID'] == table_id]
# Create a temporary folder in the new tables folder for zipping csv files
temp_folder_for_bundling = os.path.join(new_folder_tables, 'temp-{}'.format(table_id))
if not os.path.exists(temp_folder_for_bundling):
os.mkdir(temp_folder_for_bundling)
# Copy the csv files of one table to the temporary folder in the new tables folder
for _, row in df_table.iterrows():
csv = row[column_name_filename]
if os.path.exists(os.path.join(csv_file_folder, csv)):
shutil.copy(os.path.join(csv_file_folder, csv),
os.path.join(temp_folder_for_bundling, csv))
else:
print('File missing: {}'.format(os.path.join(csv_file_folder, csv)))
# Create readme.txt by append table metadata to the generic readme file
readme_table_filepath = os.path.join(temp_folder_for_bundling, 'readme.txt')
shutil.copy(readme_project_filepath, readme_table_filepath)
with open(readme_table_filepath, 'a', encoding="utf-8") as file:
metadata = ''
for col in columns_index:
if col == column_name_pdf_page_number and df_table[col].min() != df_table[col].max():
metadata += '{}: {} - {}\n'.format(col, df_table[col].min(), df_table[col].max())
else:
metadata += '{}: {}\n'.format(col, df_table.iloc[0][col])
file.write(metadata)
# Create a zip file of the table csvs and readme.txt
zipfile_name = df_table['Table Name'].iloc[0]
shutil.make_archive(os.path.join(new_folder_tables, zipfile_name), 'zip', temp_folder_for_bundling)
# Delete temp folder
shutil.rmtree(temp_folder_for_bundling, ignore_errors=True) | en | 0.782464 | # Create new project folder # Iterate over the table ids and create zip files of tables # Create a temporary folder in the new project folder for zipping csv files # Copy the csv files of one table to the temporary folder in the new project folder # Create a zip file of the table csvs # Delete the temporary folder after zipping csv files of a table # Create index file for the project # Create readme.txt # Create project zip file # Delete project folder # Create a temporary folder in the new tables folder for zipping csv files # Copy the csv files of one table to the temporary folder in the new tables folder # Create readme.txt by append table metadata to the generic readme file # Create a zip file of the table csvs and readme.txt # Delete temp folder | 3.068105 | 3 |
weblogo/logo.py | ghuls/weblogo | 108 | 6630140 | # -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 <NAME>
# Copyright (c) 2006-2015, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import os
import sys
from datetime import datetime
from io import StringIO
from math import log, sqrt
from typing.io import TextIO
from urllib.parse import urlparse, urlunparse
from urllib.request import Request, urlopen
import numpy as np
# Avoid 'from numpy import *' since numpy has lots of names defined
from numpy import any, array, asarray, float64, ones, zeros
from scipy.stats import entropy
from . import __version__, seq_io
from .color import Color
from .colorscheme import (
ColorScheme,
SymbolColor,
base_pairing,
charge,
chemistry,
hydrophobicity,
monochrome,
)
from .data import amino_acid_composition
from .logomath import Dirichlet
from .seq import (
Alphabet,
SeqList,
unambiguous_dna_alphabet,
unambiguous_protein_alphabet,
unambiguous_rna_alphabet,
)
from .utils import ArgumentError, isfloat, stdrepr
# Shorten development version string of the form weblogo-3.6.1.dev43+g64d9f12.d20190304
if __version__.find("+") != -1:
__version__ = __version__[: __version__.find("+")]
# from .logo_formatter import (GhostscriptAPI, pdf_formatter, jpeg_formatter, png_formatter,
# png_print_formatter,
# txt_formatter, eps_formatter, formatters, default_formatter)
# ------ META DATA ------
# __all__ = ['LogoOptions',
# 'description',
# '__version__',
# 'LogoFormat',
# 'LogoData',
# 'GhostscriptAPI',
# 'std_color_schemes',
# 'default_color_schemes',
# 'classic',
# 'std_units',
# 'std_sizes',
# 'std_alphabets',
# 'std_percentCG',
# 'pdf_formatter',
# 'jpeg_formatter',
# 'png_formatter',
# 'png_print_formatter',
# 'txt_formatter',
# 'eps_formatter',
# 'formatters',
# 'default_formatter',
# 'base_distribution',
# 'equiprobable_distribution',
# 'read_seq_data',
# 'Color',
# 'ColorScheme',
# 'parse_prior',
# 'release_description',
# 'description'
# ]
description = "Create sequence logos from biological sequence alignments."
release_description = "WebLogo %s" % (__version__)
def cgi(htdocs_directory): # pragma: no cover
import weblogo._cgi
weblogo._cgi.main(htdocs_directory)
aa_composition = [amino_acid_composition[_k] for _k in unambiguous_protein_alphabet]
# ------ DATA ------
classic = ColorScheme(
[
SymbolColor("G", "orange"),
SymbolColor("TU", "red"),
SymbolColor("C", "blue"),
SymbolColor("A", "green"),
]
)
std_color_schemes = {
"auto": None, # Depends on sequence type
"monochrome": monochrome,
"base pairing": base_pairing,
"classic": classic,
"hydrophobicity": hydrophobicity,
"chemistry": chemistry,
"charge": charge,
}
default_color_schemes = {
unambiguous_protein_alphabet: hydrophobicity,
unambiguous_rna_alphabet: base_pairing,
unambiguous_dna_alphabet: base_pairing,
}
std_units = {
"bits": 1.0 / log(2),
"nats": 1.0,
"digits": 1.0 / log(10),
"kT": 1.0,
"kJ/mol": 8.314472 * 298.15 / 1000.0,
"kcal/mol": 1.987 * 298.15 / 1000.0,
"probability": None,
}
# The base stack width is set equal to 9pt Courier.
# (Courier has a width equal to 3/5 of the point size.)
# Check that can get 80 characters in journal page @small
# 40 characters in a journal column
std_sizes = {"small": 5.4, "medium": 5.4 * 2, "large": 5.4 * 3}
std_alphabets = {
"protein": unambiguous_protein_alphabet,
"rna": unambiguous_rna_alphabet,
"dna": unambiguous_dna_alphabet,
}
std_percentCG = {
"H. sapiens": 40.0,
"E. coli": 50.5,
"S. cerevisiae": 38.0,
"C. elegans": 36.0,
"D. melanogaster": 43.0,
"M. musculus": 42.0,
"T. thermophilus": 69.4,
}
# Thermus thermophilus: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>: The genome sequence of the extreme
# thermophile Thermus thermophilus.
# Nat Biotechnol 2004, 22:547-53
class LogoOptions(object):
"""A container for all logo formatting options. Not all of these
are directly accessible through the CLI or web interfaces.
To display LogoOption defaults::
>>> from weblogo import *
>>> LogoOptions()
All physical lengths are measured in points. (72 points per inch, 28.3 points per cm)
Args:
creator_text: Embedded as comment in figures.
logo_title: Creates title for the sequence logo
logo_label: An optional figure label, added to the top left (e.g. '(a)').
unit_name: See std_units for options. (Default 'bits')
yaxis_label: Defaults to unit_name
xaxis_label: Add a label to the x-axis, or hide x-axis altogether.
fineprint: Defaults to WebLogo name and version
show_yaxis: Display entropy scale along y-axis (default: True)
show_xaxis: Display sequence numbers along x-axis (default: True)
show_ends: Display label at the ends of the sequence (default: False)
show_fineprint: Toggle display of the WebLogo version information in the lower
right corner. Optional, but we appreciate the acknowledgment.
show_errorbars: Draw errorbars (default: False)
show_boxes: Draw boxes around stack characters (default: True)
debug: Draw extra graphics debugging information.
rotate_numbers: Draw xaxis numbers with vertical orientation?
scale_width: boolean, scale width of characters proportional to ungaps
pad_right: Make a single line logo the same width as multiline logos
(default: False)
stacks_per_line: Maximum number of logo stacks per logo line. (Default: 40)
yaxis_tic_interval: Distance between ticmarks on y-axis(default: 1.0)
yaxis_minor_tic_ratio: Distance between minor tic ratio
yaxis_scale: Sets height of the y-axis in designated units
xaxis_tic_interval: Distance between ticmarks on x-axis(default: 1.0)
number_interval: Distance between ticmarks (default: 1.0)
shrink_fraction: Proportional shrinkage of characters if show_boxes is true.
errorbar_fraction: Sets error bars display proportion
errorbar_width_fraction: Sets error bars display
errorbar_gray: Sets error bars' gray scale percentage (default .75)
resolution: Dots per inch (default: 96). Used for bitmapped output
formats
default_color: Symbol color if not otherwise specified
color_scheme: A custom color scheme can be specified using CSS2 (Cascading
Style Sheet) syntax.
E.g. 'red', '#F00', '#FF0000', 'rgb(255, 0, 0)',
'rgb(100%, 0%, 0%)' or 'hsl(0, 100%, 50%)' for the color red.
stack_width: Scale the visible stack width by the fraction of symbols in
the column (I.e. columns with many gaps of unknowns are
narrow.) (Default: yes)
stack_aspect_ratio: Ratio of stack height to width (default: 5)
logo_margin: Default: 2 pts
stroke_width: Default: 0.5 pts
tic_length: Default: 5 pts
stack_margin: Default: 0.5 pts
small_fontsize: Small text font size in points
fontsize: Regular text font size in points
title_fontsize: Title text font size in points
number_fontsize: Font size for axis-numbers, in points.
text_font: Select font for labels
logo_font: Select font for Logo
title_font: Select font for Logo's title
first_index: Index of first position in sequence data
logo_start: Lower bound of sequence to display
logo_end: Upper bound of sequence to display
"""
def __init__(self, **kwargs):
"""Create a new LogoOptions instance.
>>> logooptions = LogoOptions(logo_title = "Some Title String")
>>> logooptions.show_yaxis = False
>>> repr(logooptions)
"""
self.alphabet = None
self.creator_text = release_description
self.logo_title = ""
self.logo_label = ""
self.stacks_per_line = 40
self.unit_name = "bits"
self.show_yaxis = True
# yaxis_lable default depends on other settings. See LogoFormat
self.yaxis_label = None
self.yaxis_tic_interval = 1.0
self.yaxis_minor_tic_ratio = 5
self.yaxis_scale = None
self.show_xaxis = True
self.xaxis_label = ""
self.xaxis_tic_interval = 1
self.rotate_numbers = False
self.number_interval = 5
self.show_ends = False
self.annotate = None
self.show_fineprint = True
self.fineprint = "WebLogo " + __version__
self.show_boxes = False
self.shrink_fraction = 0.5
self.show_errorbars = True
self.errorbar_fraction = 0.90
self.errorbar_width_fraction = 0.25
self.errorbar_gray = 0.75
self.resolution = 96.0 # Dots per inch
self.default_color = Color.by_name("black")
self.color_scheme = None
# self.show_color_key = False # NOT yet implemented
self.debug = False
self.logo_margin = 2
self.stroke_width = 0.5
self.tic_length = 5
self.stack_width = std_sizes["medium"]
self.stack_aspect_ratio = 5
self.stack_margin = 0.5
self.pad_right = False
self.small_fontsize = 6
self.fontsize = 10
self.title_fontsize = 12
self.number_fontsize = 8
self.text_font = "ArialMT"
self.logo_font = "Arial-BoldMT"
self.title_font = "ArialMT"
self.first_index = 1
self.logo_start = None
self.logo_end = None
self.scale_width = True
self.reverse_stacks = True # If true, draw stacks with largest letters on top.
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
attributes = list(vars(self).keys())
attributes.sort()
return stdrepr(self, attributes)
# End class LogoOptions
class LogoFormat(LogoOptions):
"""Specifies the format of the logo. Requires LogoData and LogoOptions
objects.
>>> logodata = LogoData.from_seqs(seqs)
>>> logooptions = LogoOptions()
>>> logooptions.title = "A Logo Title"
>>> format = LogoFormat(logodata, logooptions)
Raises:
ArgumentError: if arguments are invalid.
"""
def __init__(self, logodata, logooptions=None):
"""Create a new LogoFormat instance."""
if logooptions is not None:
self.__dict__.update(logooptions.__dict__)
self.alphabet = logodata.alphabet
self.seqlen = logodata.length
# Derived parameters.
self.show_title = False
self.show_xaxis_label = False
self.yaxis_minor_tic_interval = None
self.lines_per_logo = None
self.char_width = None # Maximum character width. Stack width minus margins.
self.line_margin_left = None
self.line_margin_right = None
self.line_margin_bottom = None
self.line_margin_top = None
self.title_height = None
self.xaxis_label_height = None
self.line_height = None
self.line_width = None
self.logo_height = None
self.logo_width = None
self.creation_date = None
self.end_type = None
self.stack_height = self.stack_width * self.stack_aspect_ratio
# Attribute to test, test, error message
arg_conditions = (
("stacks_per_line", lambda x: x > 0, "Stacks per line must be positive."),
(
"stack_width",
lambda x: x > 0.0,
"Stack width must be greater than zero.",
),
(
"stack_aspect_ratio",
lambda x: x > 0,
"Stack aspect ratio must be greater than zero.",
),
("fontsize", lambda x: x > 0, "Font sizes must be positive."),
("small_fontsize", lambda x: x > 0, "Font sizes must be positive."),
("title_fontsize", lambda x: x > 0, "Font sizes must be positive."),
(
"errorbar_fraction",
lambda x: x >= 0.0 and x <= 1.0,
"The visible fraction of the error bar must be between zero and one.",
),
(
"yaxis_tic_interval",
lambda x: x >= 0.0,
"The yaxis tic interval cannot be negative.",
),
(
"yaxis_minor_tic_interval",
lambda x: not (x and x < 0.0),
"Distances cannot be negative.",
),
(
"xaxis_tic_interval",
lambda x: x > 0.0,
"Tic interval must be greater than zero.",
),
("number_interval", lambda x: x > 0.0, "Invalid interval between numbers."),
(
"shrink_fraction",
lambda x: x >= 0.0 and x <= 1.0,
"Invalid shrink fraction.",
),
("stack_margin", lambda x: x > 0.0, "Invalid stack margin."),
("logo_margin", lambda x: x > 0.0, "Invalid logo margin."),
("stroke_width", lambda x: x > 0.0, "Invalid stroke width."),
("tic_length", lambda x: x > 0.0, "Invalid tic length."),
)
# Run arguments tests. The second, attribute argument to the ArgumentError is
# used by the UI to provide user feedback.
# FIXME: More validation
for test in arg_conditions:
if not test[1](getattr(self, test[0])):
raise ArgumentError(test[2], test[0])
# Inclusive upper and lower bounds
# FIXME: Validate here. Move from eps_formatter
if self.logo_start is None:
self.logo_start = self.first_index
if self.logo_end is None:
self.logo_end = self.seqlen + self.first_index - 1
self.total_stacks = self.logo_end - self.logo_start + 1
if self.total_stacks <= 0:
raise ArgumentError("Logo must contain at least one stack", "logo_end")
if self.logo_start - self.first_index < 0:
raise ArgumentError(
"Logo range extends before start of available sequence.", "logo_range"
)
if self.logo_end - self.first_index >= self.seqlen:
raise ArgumentError(
"Logo range extends beyond end of available sequence.", "logo_range"
)
if self.logo_title:
self.show_title = True
if not self.fineprint:
self.show_fineprint = False
if self.xaxis_label:
self.show_xaxis_label = True
if self.yaxis_label is None:
self.yaxis_label = self.unit_name
if self.yaxis_label:
self.show_yaxis_label = True
else:
self.show_yaxis_label = False
self.show_ends = False
if not self.yaxis_scale:
conversion_factor = std_units[self.unit_name]
if conversion_factor:
if self.alphabet is None:
raise ArgumentError("Need an alphabet", "alphabet")
self.yaxis_scale = log(len(self.alphabet)) * conversion_factor
else:
self.yaxis_scale = 1.0 # probability units
if self.yaxis_scale <= 0.0:
raise ArgumentError(
"Invalid yaxis scale",
"yaxis_scale",
)
if self.yaxis_tic_interval >= self.yaxis_scale:
self.yaxis_tic_interval /= 2.0
self.yaxis_minor_tic_interval = (
float(self.yaxis_tic_interval) / self.yaxis_minor_tic_ratio
)
if self.color_scheme is None:
if self.alphabet in default_color_schemes:
self.color_scheme = default_color_schemes[self.alphabet]
else:
self.color_scheme = monochrome
self.lines_per_logo = 1 + ((self.total_stacks - 1) // self.stacks_per_line)
if self.lines_per_logo == 1 and not self.pad_right:
self.stacks_per_line = min(self.stacks_per_line, self.total_stacks)
self.char_width = self.stack_width - 2 * self.stack_margin
if self.show_yaxis:
self.line_margin_left = self.fontsize * 3.0
else:
if self.show_ends and self.show_xaxis:
self.line_margin_left = self.fontsize * 1.5
else:
self.line_margin_left = 4
if self.show_ends and self.show_xaxis:
self.line_margin_right = self.fontsize * 1.5
else:
self.line_margin_right = 4
if self.show_xaxis:
if self.rotate_numbers:
self.line_margin_bottom = self.number_fontsize * 2.5
else:
self.line_margin_bottom = self.number_fontsize * 1.5
else:
self.line_margin_bottom = 4
self.line_margin_top = 4
if self.show_title:
self.title_height = self.title_fontsize
else:
self.title_height = 0
self.xaxis_label_height = 0.0
if self.show_xaxis_label:
self.xaxis_label_height += self.fontsize
if self.show_fineprint:
if len(self.fineprint) != 0:
self.xaxis_label_height += self.small_fontsize
self.line_height = (
self.stack_height + self.line_margin_top + self.line_margin_bottom
)
self.line_width = (
self.stack_width * self.stacks_per_line
+ self.line_margin_left
+ self.line_margin_right
)
self.logo_height = int(
2 * self.logo_margin
+ self.title_height
+ self.xaxis_label_height
+ self.line_height * self.lines_per_logo
)
self.logo_width = int(2 * self.logo_margin + self.line_width)
self.creation_date = datetime.now().isoformat(" ")
end_type = "-"
end_types = {
unambiguous_protein_alphabet: "p",
unambiguous_rna_alphabet: "-",
unambiguous_dna_alphabet: "d",
}
if self.show_ends and self.alphabet in end_types:
end_type = end_types[self.alphabet]
self.end_type = end_type
if self.annotate is None:
self.annotate = []
for i in range(self.seqlen):
index = i + self.first_index
if index % self.number_interval == 0:
self.annotate.append("%d" % index)
else:
self.annotate.append("")
if len(self.annotate) != self.seqlen:
raise ArgumentError(
"Annotations must be same length as sequences.", "annotate"
)
# End __init__
# End class LogoFormat
def parse_prior(composition, alphabet, weight=None):
"""Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
* None or 'none'
No composition sepecified
* 'auto' or 'automatic'
Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
* 'equiprobable'
All monomers have the same probability.
* a percentage, e.g. '45%' or a fraction '0.45'
The fraction of CG bases for nucleotide alphabets
* a species name, e.g. '<NAME>i', 'H. sapiens',
Use the average CG percentage for the species's genome.
* An explicit distribution
e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None:
return None
comp = composition.strip()
if comp.lower() == "none":
return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight < 0:
raise ValueError("Weight cannot be negative.")
if comp.lower() == "equiprobable":
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == "auto" or comp.lower() == "automatic":
if alphabet == unambiguous_protein_alphabet:
prior = weight * asarray(aa_composition, float64)
else:
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG:
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == "%":
prior = weight * base_distribution(float(comp[:-1]))
elif isfloat(comp):
prior = weight * base_distribution(float(comp) * 100.0)
elif composition[0] == "{" and composition[-1] == "}":
explicit = composition[1:-1]
explicit = (
explicit.replace(",", " ")
.replace("'", " ")
.replace('"', " ")
.replace(":", " ")
.split()
)
if len(explicit) != len(alphabet) * 2:
raise ValueError("Explicit prior does not match length of alphabet")
prior = -ones(len(alphabet), float64)
try:
for r in range(len(explicit) // 2):
letter = explicit[r * 2]
index = alphabet.ord(letter)
value = float(explicit[r * 2 + 1])
prior[index] = value
except ValueError:
raise ValueError("Cannot parse explicit composition")
if any(prior == -1.0):
raise ValueError(
"Explicit prior does not match alphabet"
) # pragma: no cover
prior /= sum(prior)
prior *= weight
else:
raise ValueError("Unknown or malformed composition: %s" % composition)
if len(prior) != len(alphabet):
raise ValueError(
"The sequence alphabet and composition are incompatible."
) # pragma: no cover
return prior
def base_distribution(percentCG):
A = (1.0 - (percentCG / 100.0)) / 2.0
C = (percentCG / 100.0) / 2.0
G = (percentCG / 100.0) / 2.0
T = (1.0 - (percentCG / 100)) / 2.0
return asarray((A, C, G, T), float64)
def equiprobable_distribution(length: int) -> np.ndarray:
return ones((length), float64) / length
def _seq_formats():
"""Return a dictionary mapping between the names of formats for the sequence data
and the corresponing parsers.
"""
# Add position weight matrix formats to input parsers by hand
fin_choices = dict(seq_io.format_names())
fin_choices["transfac"] = "transfac"
del fin_choices["plain"]
return fin_choices
def _seq_names():
"""Returns a list of the names of accepted sequence data formats."""
fin_names = [f.names[0] for f in seq_io.formats]
fin_names.remove("plain")
fin_names.append("transfac")
return fin_names
def read_seq_data(
fin,
input_parser=seq_io.read,
alphabet=None,
ignore_lower_case=False,
max_file_size=0,
):
"""Read sequence data from the input stream and return a seqs object.
The environment variable WEBLOGO_MAX_FILE_SIZE overides the max_file_size argument.
Used to limit the load on the WebLogo webserver.
"""
max_file_size = int(os.environ.get("WEBLOGO_MAX_FILE_SIZE", max_file_size))
# If max_file_size is set, or if fin==stdin (which is non-seekable), we
# read the data and replace fin with a StringIO object.
if max_file_size > 0:
data = fin.read(max_file_size)
more_data = fin.read(2)
if more_data != "":
raise IOError("File exceeds maximum allowed size: %d bytes" % max_file_size)
fin = StringIO(data)
elif fin == sys.stdin:
fin = StringIO(fin.read())
fin.seek(0)
seqs = input_parser(fin)
if seqs is None or len(seqs) == 0:
raise ValueError("Please provide a multiple sequence alignment")
if ignore_lower_case:
# Case is significant. Do not count lower case letters.
for i, s in enumerate(seqs):
seqs[i] = s.mask()
# Add alphabet to seqs.
if alphabet:
seqs.alphabet = Alphabet(alphabet)
else:
seqs.alphabet = Alphabet.which(seqs)
return seqs
class LogoData(object):
"""The data needed to generate a sequence logo.
Args:
alphabet: The set of symbols to count.
See also --sequence-type, --ignore-lower-case
length: All sequences must be the same length, else WebLogo will return an error
counts: An array of character counts
entropy: The relative entropy of each column
entropy_interval: entropy confidence interval
"""
def __init__(
self,
length=None,
alphabet=None,
counts=None,
entropy=None,
entropy_interval=None,
weight=None,
):
"""Creates a new LogoData object"""
self.length = length
self.alphabet = alphabet
self.counts = counts
self.entropy = entropy
self.entropy_interval = entropy_interval
self.weight = weight
@classmethod
def from_counts(
cls, alphabet: Alphabet, counts: np.ndarray, prior: np.ndarray = None
) -> "LogoData":
"""Build a LogoData object from counts."""
# Counts is a Motif object?
# counts = counts.array
seq_length, A = counts.shape
if prior is not None:
prior = array(prior, float64)
if prior is None or sum(prior) == 0.0:
R = log(A)
ent = zeros(seq_length, float64)
entropy_interval = None
for i in range(0, seq_length):
C = sum(counts[i])
# FIXME: fixup .moremath.entropy()?
if C == 0:
ent[i] = 0.0
else:
ent[i] = R - entropy(counts[i])
else:
ent = zeros(seq_length, float64)
entropy_interval = zeros((seq_length, 2), float64)
R = log(A)
for i in range(0, seq_length):
alpha = array(counts[i], float64)
alpha += prior
posterior = Dirichlet(alpha)
ent[i] = posterior.mean_relative_entropy(prior / sum(prior))
(
entropy_interval[i][0],
entropy_interval[i][1],
) = posterior.interval_relative_entropy(prior / sum(prior), 0.95)
weight = array(np.sum(counts, axis=1), float)
max_weight = max(weight)
if max_weight == 0.0:
raise ValueError("No counts.")
weight /= max_weight
return cls(seq_length, alphabet, counts, ent, entropy_interval, weight)
@classmethod
def from_seqs(cls, seqs: SeqList, prior: np.ndarray = None) -> "LogoData":
"""Build a LogoData object from a SeqList, a list of sequences."""
# --- VALIDATE DATA ---
# check that at least one sequence of length at least 1 long
if len(seqs) == 0 or len(seqs[0]) == 0:
raise ValueError("No sequence data found.")
# Check sequence lengths
seq_length = len(seqs[0])
for i, s in enumerate(seqs):
# print(i, s, len(s))
# TODO: Redundant? Should be checked in SeqList?
if seq_length != len(s):
raise ArgumentError(
"Sequence number %d differs in length from the previous sequences"
% (i + 1),
"sequences",
)
# FIXME: Check seqs.alphabet?
counts = seqs.profile()
return cls.from_counts(seqs.alphabet, counts, prior)
def __str__(self) -> str:
out = StringIO()
print("## LogoData", file=out)
print("# First column is position number, counting from zero", file=out)
print("# Subsequent columns are raw symbol counts", file=out)
print("# Entropy is mean entropy measured in nats.", file=out)
print("# Low and High are the 95% confidence limits.", file=out)
print("# Weight is the fraction of non-gap symbols in the column.", file=out)
print("#\t", file=out)
# Show column names
print("#", end="\t", file=out)
for a in self.alphabet:
print(a, end=" \t", file=out)
print("Entropy\tLow\tHigh\tWeight", file=out)
# Write the data table
for i in range(self.length):
print(i + 1, end=" \t", file=out)
for c in self.counts[i]:
print(c, end=" \t", file=out)
print("%6.4f" % self.entropy[i], end=" \t", file=out)
if self.entropy_interval is not None:
print("%6.4f" % self.entropy_interval[i][0], end=" \t", file=out)
print("%6.4f" % self.entropy_interval[i][1], end=" \t", file=out)
else:
print("\t", "\t", end="", file=out)
if self.weight is not None:
print("%6.4f" % self.weight[i], end="", file=out)
print("", file=out)
print("# End LogoData", file=out)
return out.getvalue()
def _from_URL_fileopen(target_url: str) -> TextIO: # pragma: no cover
"""opens files from a remote URL location"""
# parsing url in component parts
(scheme, net_location, path, param, query, frag) = urlparse(target_url)
# checks if string is URL link
if scheme != "http" and scheme != "https" and scheme != "ftp":
raise ValueError("Cannot open url: %s", target_url)
# checks for dropbox link
if net_location == "www.dropbox.com":
# changes dropbox http link into download link
if query == "dl=0":
query2 = "dl=1"
# rebuild download URL, with new query2 variable
target_url = urlunparse((scheme, net_location, path, param, query2, ""))
# checks for google drive link
if net_location == "drive.google.com":
# link configuration for direct download instead of html frame
google_directdl_frag = "https://docs.google.com/uc?export=download&id="
# pull file id
(scheme, net_location, path_raw, param, query, frag) = urlparse(target_url)
id_file = path_raw.split("/")[3]
# rebuild URL for direct download
target_url = google_directdl_frag + id_file
# save url to temporary file
req = Request(target_url)
res = urlopen(req)
return StringIO(str(res.read()))
| # -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 <NAME>
# Copyright (c) 2006-2015, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import os
import sys
from datetime import datetime
from io import StringIO
from math import log, sqrt
from typing.io import TextIO
from urllib.parse import urlparse, urlunparse
from urllib.request import Request, urlopen
import numpy as np
# Avoid 'from numpy import *' since numpy has lots of names defined
from numpy import any, array, asarray, float64, ones, zeros
from scipy.stats import entropy
from . import __version__, seq_io
from .color import Color
from .colorscheme import (
ColorScheme,
SymbolColor,
base_pairing,
charge,
chemistry,
hydrophobicity,
monochrome,
)
from .data import amino_acid_composition
from .logomath import Dirichlet
from .seq import (
Alphabet,
SeqList,
unambiguous_dna_alphabet,
unambiguous_protein_alphabet,
unambiguous_rna_alphabet,
)
from .utils import ArgumentError, isfloat, stdrepr
# Shorten development version string of the form weblogo-3.6.1.dev43+g64d9f12.d20190304
if __version__.find("+") != -1:
__version__ = __version__[: __version__.find("+")]
# from .logo_formatter import (GhostscriptAPI, pdf_formatter, jpeg_formatter, png_formatter,
# png_print_formatter,
# txt_formatter, eps_formatter, formatters, default_formatter)
# ------ META DATA ------
# __all__ = ['LogoOptions',
# 'description',
# '__version__',
# 'LogoFormat',
# 'LogoData',
# 'GhostscriptAPI',
# 'std_color_schemes',
# 'default_color_schemes',
# 'classic',
# 'std_units',
# 'std_sizes',
# 'std_alphabets',
# 'std_percentCG',
# 'pdf_formatter',
# 'jpeg_formatter',
# 'png_formatter',
# 'png_print_formatter',
# 'txt_formatter',
# 'eps_formatter',
# 'formatters',
# 'default_formatter',
# 'base_distribution',
# 'equiprobable_distribution',
# 'read_seq_data',
# 'Color',
# 'ColorScheme',
# 'parse_prior',
# 'release_description',
# 'description'
# ]
description = "Create sequence logos from biological sequence alignments."
release_description = "WebLogo %s" % (__version__)
def cgi(htdocs_directory): # pragma: no cover
import weblogo._cgi
weblogo._cgi.main(htdocs_directory)
aa_composition = [amino_acid_composition[_k] for _k in unambiguous_protein_alphabet]
# ------ DATA ------
classic = ColorScheme(
[
SymbolColor("G", "orange"),
SymbolColor("TU", "red"),
SymbolColor("C", "blue"),
SymbolColor("A", "green"),
]
)
std_color_schemes = {
"auto": None, # Depends on sequence type
"monochrome": monochrome,
"base pairing": base_pairing,
"classic": classic,
"hydrophobicity": hydrophobicity,
"chemistry": chemistry,
"charge": charge,
}
default_color_schemes = {
unambiguous_protein_alphabet: hydrophobicity,
unambiguous_rna_alphabet: base_pairing,
unambiguous_dna_alphabet: base_pairing,
}
std_units = {
"bits": 1.0 / log(2),
"nats": 1.0,
"digits": 1.0 / log(10),
"kT": 1.0,
"kJ/mol": 8.314472 * 298.15 / 1000.0,
"kcal/mol": 1.987 * 298.15 / 1000.0,
"probability": None,
}
# The base stack width is set equal to 9pt Courier.
# (Courier has a width equal to 3/5 of the point size.)
# Check that can get 80 characters in journal page @small
# 40 characters in a journal column
std_sizes = {"small": 5.4, "medium": 5.4 * 2, "large": 5.4 * 3}
std_alphabets = {
"protein": unambiguous_protein_alphabet,
"rna": unambiguous_rna_alphabet,
"dna": unambiguous_dna_alphabet,
}
std_percentCG = {
"H. sapiens": 40.0,
"E. coli": 50.5,
"S. cerevisiae": 38.0,
"C. elegans": 36.0,
"D. melanogaster": 43.0,
"M. musculus": 42.0,
"T. thermophilus": 69.4,
}
# Thermus thermophilus: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>: The genome sequence of the extreme
# thermophile Thermus thermophilus.
# Nat Biotechnol 2004, 22:547-53
class LogoOptions(object):
"""A container for all logo formatting options. Not all of these
are directly accessible through the CLI or web interfaces.
To display LogoOption defaults::
>>> from weblogo import *
>>> LogoOptions()
All physical lengths are measured in points. (72 points per inch, 28.3 points per cm)
Args:
creator_text: Embedded as comment in figures.
logo_title: Creates title for the sequence logo
logo_label: An optional figure label, added to the top left (e.g. '(a)').
unit_name: See std_units for options. (Default 'bits')
yaxis_label: Defaults to unit_name
xaxis_label: Add a label to the x-axis, or hide x-axis altogether.
fineprint: Defaults to WebLogo name and version
show_yaxis: Display entropy scale along y-axis (default: True)
show_xaxis: Display sequence numbers along x-axis (default: True)
show_ends: Display label at the ends of the sequence (default: False)
show_fineprint: Toggle display of the WebLogo version information in the lower
right corner. Optional, but we appreciate the acknowledgment.
show_errorbars: Draw errorbars (default: False)
show_boxes: Draw boxes around stack characters (default: True)
debug: Draw extra graphics debugging information.
rotate_numbers: Draw xaxis numbers with vertical orientation?
scale_width: boolean, scale width of characters proportional to ungaps
pad_right: Make a single line logo the same width as multiline logos
(default: False)
stacks_per_line: Maximum number of logo stacks per logo line. (Default: 40)
yaxis_tic_interval: Distance between ticmarks on y-axis(default: 1.0)
yaxis_minor_tic_ratio: Distance between minor tic ratio
yaxis_scale: Sets height of the y-axis in designated units
xaxis_tic_interval: Distance between ticmarks on x-axis(default: 1.0)
number_interval: Distance between ticmarks (default: 1.0)
shrink_fraction: Proportional shrinkage of characters if show_boxes is true.
errorbar_fraction: Sets error bars display proportion
errorbar_width_fraction: Sets error bars display
errorbar_gray: Sets error bars' gray scale percentage (default .75)
resolution: Dots per inch (default: 96). Used for bitmapped output
formats
default_color: Symbol color if not otherwise specified
color_scheme: A custom color scheme can be specified using CSS2 (Cascading
Style Sheet) syntax.
E.g. 'red', '#F00', '#FF0000', 'rgb(255, 0, 0)',
'rgb(100%, 0%, 0%)' or 'hsl(0, 100%, 50%)' for the color red.
stack_width: Scale the visible stack width by the fraction of symbols in
the column (I.e. columns with many gaps of unknowns are
narrow.) (Default: yes)
stack_aspect_ratio: Ratio of stack height to width (default: 5)
logo_margin: Default: 2 pts
stroke_width: Default: 0.5 pts
tic_length: Default: 5 pts
stack_margin: Default: 0.5 pts
small_fontsize: Small text font size in points
fontsize: Regular text font size in points
title_fontsize: Title text font size in points
number_fontsize: Font size for axis-numbers, in points.
text_font: Select font for labels
logo_font: Select font for Logo
title_font: Select font for Logo's title
first_index: Index of first position in sequence data
logo_start: Lower bound of sequence to display
logo_end: Upper bound of sequence to display
"""
def __init__(self, **kwargs):
"""Create a new LogoOptions instance.
>>> logooptions = LogoOptions(logo_title = "Some Title String")
>>> logooptions.show_yaxis = False
>>> repr(logooptions)
"""
self.alphabet = None
self.creator_text = release_description
self.logo_title = ""
self.logo_label = ""
self.stacks_per_line = 40
self.unit_name = "bits"
self.show_yaxis = True
# yaxis_lable default depends on other settings. See LogoFormat
self.yaxis_label = None
self.yaxis_tic_interval = 1.0
self.yaxis_minor_tic_ratio = 5
self.yaxis_scale = None
self.show_xaxis = True
self.xaxis_label = ""
self.xaxis_tic_interval = 1
self.rotate_numbers = False
self.number_interval = 5
self.show_ends = False
self.annotate = None
self.show_fineprint = True
self.fineprint = "WebLogo " + __version__
self.show_boxes = False
self.shrink_fraction = 0.5
self.show_errorbars = True
self.errorbar_fraction = 0.90
self.errorbar_width_fraction = 0.25
self.errorbar_gray = 0.75
self.resolution = 96.0 # Dots per inch
self.default_color = Color.by_name("black")
self.color_scheme = None
# self.show_color_key = False # NOT yet implemented
self.debug = False
self.logo_margin = 2
self.stroke_width = 0.5
self.tic_length = 5
self.stack_width = std_sizes["medium"]
self.stack_aspect_ratio = 5
self.stack_margin = 0.5
self.pad_right = False
self.small_fontsize = 6
self.fontsize = 10
self.title_fontsize = 12
self.number_fontsize = 8
self.text_font = "ArialMT"
self.logo_font = "Arial-BoldMT"
self.title_font = "ArialMT"
self.first_index = 1
self.logo_start = None
self.logo_end = None
self.scale_width = True
self.reverse_stacks = True # If true, draw stacks with largest letters on top.
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
attributes = list(vars(self).keys())
attributes.sort()
return stdrepr(self, attributes)
# End class LogoOptions
class LogoFormat(LogoOptions):
"""Specifies the format of the logo. Requires LogoData and LogoOptions
objects.
>>> logodata = LogoData.from_seqs(seqs)
>>> logooptions = LogoOptions()
>>> logooptions.title = "A Logo Title"
>>> format = LogoFormat(logodata, logooptions)
Raises:
ArgumentError: if arguments are invalid.
"""
def __init__(self, logodata, logooptions=None):
"""Create a new LogoFormat instance."""
if logooptions is not None:
self.__dict__.update(logooptions.__dict__)
self.alphabet = logodata.alphabet
self.seqlen = logodata.length
# Derived parameters.
self.show_title = False
self.show_xaxis_label = False
self.yaxis_minor_tic_interval = None
self.lines_per_logo = None
self.char_width = None # Maximum character width. Stack width minus margins.
self.line_margin_left = None
self.line_margin_right = None
self.line_margin_bottom = None
self.line_margin_top = None
self.title_height = None
self.xaxis_label_height = None
self.line_height = None
self.line_width = None
self.logo_height = None
self.logo_width = None
self.creation_date = None
self.end_type = None
self.stack_height = self.stack_width * self.stack_aspect_ratio
# Attribute to test, test, error message
arg_conditions = (
("stacks_per_line", lambda x: x > 0, "Stacks per line must be positive."),
(
"stack_width",
lambda x: x > 0.0,
"Stack width must be greater than zero.",
),
(
"stack_aspect_ratio",
lambda x: x > 0,
"Stack aspect ratio must be greater than zero.",
),
("fontsize", lambda x: x > 0, "Font sizes must be positive."),
("small_fontsize", lambda x: x > 0, "Font sizes must be positive."),
("title_fontsize", lambda x: x > 0, "Font sizes must be positive."),
(
"errorbar_fraction",
lambda x: x >= 0.0 and x <= 1.0,
"The visible fraction of the error bar must be between zero and one.",
),
(
"yaxis_tic_interval",
lambda x: x >= 0.0,
"The yaxis tic interval cannot be negative.",
),
(
"yaxis_minor_tic_interval",
lambda x: not (x and x < 0.0),
"Distances cannot be negative.",
),
(
"xaxis_tic_interval",
lambda x: x > 0.0,
"Tic interval must be greater than zero.",
),
("number_interval", lambda x: x > 0.0, "Invalid interval between numbers."),
(
"shrink_fraction",
lambda x: x >= 0.0 and x <= 1.0,
"Invalid shrink fraction.",
),
("stack_margin", lambda x: x > 0.0, "Invalid stack margin."),
("logo_margin", lambda x: x > 0.0, "Invalid logo margin."),
("stroke_width", lambda x: x > 0.0, "Invalid stroke width."),
("tic_length", lambda x: x > 0.0, "Invalid tic length."),
)
# Run arguments tests. The second, attribute argument to the ArgumentError is
# used by the UI to provide user feedback.
# FIXME: More validation
for test in arg_conditions:
if not test[1](getattr(self, test[0])):
raise ArgumentError(test[2], test[0])
# Inclusive upper and lower bounds
# FIXME: Validate here. Move from eps_formatter
if self.logo_start is None:
self.logo_start = self.first_index
if self.logo_end is None:
self.logo_end = self.seqlen + self.first_index - 1
self.total_stacks = self.logo_end - self.logo_start + 1
if self.total_stacks <= 0:
raise ArgumentError("Logo must contain at least one stack", "logo_end")
if self.logo_start - self.first_index < 0:
raise ArgumentError(
"Logo range extends before start of available sequence.", "logo_range"
)
if self.logo_end - self.first_index >= self.seqlen:
raise ArgumentError(
"Logo range extends beyond end of available sequence.", "logo_range"
)
if self.logo_title:
self.show_title = True
if not self.fineprint:
self.show_fineprint = False
if self.xaxis_label:
self.show_xaxis_label = True
if self.yaxis_label is None:
self.yaxis_label = self.unit_name
if self.yaxis_label:
self.show_yaxis_label = True
else:
self.show_yaxis_label = False
self.show_ends = False
if not self.yaxis_scale:
conversion_factor = std_units[self.unit_name]
if conversion_factor:
if self.alphabet is None:
raise ArgumentError("Need an alphabet", "alphabet")
self.yaxis_scale = log(len(self.alphabet)) * conversion_factor
else:
self.yaxis_scale = 1.0 # probability units
if self.yaxis_scale <= 0.0:
raise ArgumentError(
"Invalid yaxis scale",
"yaxis_scale",
)
if self.yaxis_tic_interval >= self.yaxis_scale:
self.yaxis_tic_interval /= 2.0
self.yaxis_minor_tic_interval = (
float(self.yaxis_tic_interval) / self.yaxis_minor_tic_ratio
)
if self.color_scheme is None:
if self.alphabet in default_color_schemes:
self.color_scheme = default_color_schemes[self.alphabet]
else:
self.color_scheme = monochrome
self.lines_per_logo = 1 + ((self.total_stacks - 1) // self.stacks_per_line)
if self.lines_per_logo == 1 and not self.pad_right:
self.stacks_per_line = min(self.stacks_per_line, self.total_stacks)
self.char_width = self.stack_width - 2 * self.stack_margin
if self.show_yaxis:
self.line_margin_left = self.fontsize * 3.0
else:
if self.show_ends and self.show_xaxis:
self.line_margin_left = self.fontsize * 1.5
else:
self.line_margin_left = 4
if self.show_ends and self.show_xaxis:
self.line_margin_right = self.fontsize * 1.5
else:
self.line_margin_right = 4
if self.show_xaxis:
if self.rotate_numbers:
self.line_margin_bottom = self.number_fontsize * 2.5
else:
self.line_margin_bottom = self.number_fontsize * 1.5
else:
self.line_margin_bottom = 4
self.line_margin_top = 4
if self.show_title:
self.title_height = self.title_fontsize
else:
self.title_height = 0
self.xaxis_label_height = 0.0
if self.show_xaxis_label:
self.xaxis_label_height += self.fontsize
if self.show_fineprint:
if len(self.fineprint) != 0:
self.xaxis_label_height += self.small_fontsize
self.line_height = (
self.stack_height + self.line_margin_top + self.line_margin_bottom
)
self.line_width = (
self.stack_width * self.stacks_per_line
+ self.line_margin_left
+ self.line_margin_right
)
self.logo_height = int(
2 * self.logo_margin
+ self.title_height
+ self.xaxis_label_height
+ self.line_height * self.lines_per_logo
)
self.logo_width = int(2 * self.logo_margin + self.line_width)
self.creation_date = datetime.now().isoformat(" ")
end_type = "-"
end_types = {
unambiguous_protein_alphabet: "p",
unambiguous_rna_alphabet: "-",
unambiguous_dna_alphabet: "d",
}
if self.show_ends and self.alphabet in end_types:
end_type = end_types[self.alphabet]
self.end_type = end_type
if self.annotate is None:
self.annotate = []
for i in range(self.seqlen):
index = i + self.first_index
if index % self.number_interval == 0:
self.annotate.append("%d" % index)
else:
self.annotate.append("")
if len(self.annotate) != self.seqlen:
raise ArgumentError(
"Annotations must be same length as sequences.", "annotate"
)
# End __init__
# End class LogoFormat
def parse_prior(composition, alphabet, weight=None):
"""Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
* None or 'none'
No composition sepecified
* 'auto' or 'automatic'
Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
* 'equiprobable'
All monomers have the same probability.
* a percentage, e.g. '45%' or a fraction '0.45'
The fraction of CG bases for nucleotide alphabets
* a species name, e.g. '<NAME>i', 'H. sapiens',
Use the average CG percentage for the species's genome.
* An explicit distribution
e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None:
return None
comp = composition.strip()
if comp.lower() == "none":
return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight < 0:
raise ValueError("Weight cannot be negative.")
if comp.lower() == "equiprobable":
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == "auto" or comp.lower() == "automatic":
if alphabet == unambiguous_protein_alphabet:
prior = weight * asarray(aa_composition, float64)
else:
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG:
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == "%":
prior = weight * base_distribution(float(comp[:-1]))
elif isfloat(comp):
prior = weight * base_distribution(float(comp) * 100.0)
elif composition[0] == "{" and composition[-1] == "}":
explicit = composition[1:-1]
explicit = (
explicit.replace(",", " ")
.replace("'", " ")
.replace('"', " ")
.replace(":", " ")
.split()
)
if len(explicit) != len(alphabet) * 2:
raise ValueError("Explicit prior does not match length of alphabet")
prior = -ones(len(alphabet), float64)
try:
for r in range(len(explicit) // 2):
letter = explicit[r * 2]
index = alphabet.ord(letter)
value = float(explicit[r * 2 + 1])
prior[index] = value
except ValueError:
raise ValueError("Cannot parse explicit composition")
if any(prior == -1.0):
raise ValueError(
"Explicit prior does not match alphabet"
) # pragma: no cover
prior /= sum(prior)
prior *= weight
else:
raise ValueError("Unknown or malformed composition: %s" % composition)
if len(prior) != len(alphabet):
raise ValueError(
"The sequence alphabet and composition are incompatible."
) # pragma: no cover
return prior
def base_distribution(percentCG):
A = (1.0 - (percentCG / 100.0)) / 2.0
C = (percentCG / 100.0) / 2.0
G = (percentCG / 100.0) / 2.0
T = (1.0 - (percentCG / 100)) / 2.0
return asarray((A, C, G, T), float64)
def equiprobable_distribution(length: int) -> np.ndarray:
return ones((length), float64) / length
def _seq_formats():
"""Return a dictionary mapping between the names of formats for the sequence data
and the corresponing parsers.
"""
# Add position weight matrix formats to input parsers by hand
fin_choices = dict(seq_io.format_names())
fin_choices["transfac"] = "transfac"
del fin_choices["plain"]
return fin_choices
def _seq_names():
"""Returns a list of the names of accepted sequence data formats."""
fin_names = [f.names[0] for f in seq_io.formats]
fin_names.remove("plain")
fin_names.append("transfac")
return fin_names
def read_seq_data(
fin,
input_parser=seq_io.read,
alphabet=None,
ignore_lower_case=False,
max_file_size=0,
):
"""Read sequence data from the input stream and return a seqs object.
The environment variable WEBLOGO_MAX_FILE_SIZE overides the max_file_size argument.
Used to limit the load on the WebLogo webserver.
"""
max_file_size = int(os.environ.get("WEBLOGO_MAX_FILE_SIZE", max_file_size))
# If max_file_size is set, or if fin==stdin (which is non-seekable), we
# read the data and replace fin with a StringIO object.
if max_file_size > 0:
data = fin.read(max_file_size)
more_data = fin.read(2)
if more_data != "":
raise IOError("File exceeds maximum allowed size: %d bytes" % max_file_size)
fin = StringIO(data)
elif fin == sys.stdin:
fin = StringIO(fin.read())
fin.seek(0)
seqs = input_parser(fin)
if seqs is None or len(seqs) == 0:
raise ValueError("Please provide a multiple sequence alignment")
if ignore_lower_case:
# Case is significant. Do not count lower case letters.
for i, s in enumerate(seqs):
seqs[i] = s.mask()
# Add alphabet to seqs.
if alphabet:
seqs.alphabet = Alphabet(alphabet)
else:
seqs.alphabet = Alphabet.which(seqs)
return seqs
class LogoData(object):
"""The data needed to generate a sequence logo.
Args:
alphabet: The set of symbols to count.
See also --sequence-type, --ignore-lower-case
length: All sequences must be the same length, else WebLogo will return an error
counts: An array of character counts
entropy: The relative entropy of each column
entropy_interval: entropy confidence interval
"""
def __init__(
self,
length=None,
alphabet=None,
counts=None,
entropy=None,
entropy_interval=None,
weight=None,
):
"""Creates a new LogoData object"""
self.length = length
self.alphabet = alphabet
self.counts = counts
self.entropy = entropy
self.entropy_interval = entropy_interval
self.weight = weight
@classmethod
def from_counts(
cls, alphabet: Alphabet, counts: np.ndarray, prior: np.ndarray = None
) -> "LogoData":
"""Build a LogoData object from counts."""
# Counts is a Motif object?
# counts = counts.array
seq_length, A = counts.shape
if prior is not None:
prior = array(prior, float64)
if prior is None or sum(prior) == 0.0:
R = log(A)
ent = zeros(seq_length, float64)
entropy_interval = None
for i in range(0, seq_length):
C = sum(counts[i])
# FIXME: fixup .moremath.entropy()?
if C == 0:
ent[i] = 0.0
else:
ent[i] = R - entropy(counts[i])
else:
ent = zeros(seq_length, float64)
entropy_interval = zeros((seq_length, 2), float64)
R = log(A)
for i in range(0, seq_length):
alpha = array(counts[i], float64)
alpha += prior
posterior = Dirichlet(alpha)
ent[i] = posterior.mean_relative_entropy(prior / sum(prior))
(
entropy_interval[i][0],
entropy_interval[i][1],
) = posterior.interval_relative_entropy(prior / sum(prior), 0.95)
weight = array(np.sum(counts, axis=1), float)
max_weight = max(weight)
if max_weight == 0.0:
raise ValueError("No counts.")
weight /= max_weight
return cls(seq_length, alphabet, counts, ent, entropy_interval, weight)
@classmethod
def from_seqs(cls, seqs: SeqList, prior: np.ndarray = None) -> "LogoData":
"""Build a LogoData object from a SeqList, a list of sequences."""
# --- VALIDATE DATA ---
# check that at least one sequence of length at least 1 long
if len(seqs) == 0 or len(seqs[0]) == 0:
raise ValueError("No sequence data found.")
# Check sequence lengths
seq_length = len(seqs[0])
for i, s in enumerate(seqs):
# print(i, s, len(s))
# TODO: Redundant? Should be checked in SeqList?
if seq_length != len(s):
raise ArgumentError(
"Sequence number %d differs in length from the previous sequences"
% (i + 1),
"sequences",
)
# FIXME: Check seqs.alphabet?
counts = seqs.profile()
return cls.from_counts(seqs.alphabet, counts, prior)
def __str__(self) -> str:
out = StringIO()
print("## LogoData", file=out)
print("# First column is position number, counting from zero", file=out)
print("# Subsequent columns are raw symbol counts", file=out)
print("# Entropy is mean entropy measured in nats.", file=out)
print("# Low and High are the 95% confidence limits.", file=out)
print("# Weight is the fraction of non-gap symbols in the column.", file=out)
print("#\t", file=out)
# Show column names
print("#", end="\t", file=out)
for a in self.alphabet:
print(a, end=" \t", file=out)
print("Entropy\tLow\tHigh\tWeight", file=out)
# Write the data table
for i in range(self.length):
print(i + 1, end=" \t", file=out)
for c in self.counts[i]:
print(c, end=" \t", file=out)
print("%6.4f" % self.entropy[i], end=" \t", file=out)
if self.entropy_interval is not None:
print("%6.4f" % self.entropy_interval[i][0], end=" \t", file=out)
print("%6.4f" % self.entropy_interval[i][1], end=" \t", file=out)
else:
print("\t", "\t", end="", file=out)
if self.weight is not None:
print("%6.4f" % self.weight[i], end="", file=out)
print("", file=out)
print("# End LogoData", file=out)
return out.getvalue()
def _from_URL_fileopen(target_url: str) -> TextIO: # pragma: no cover
"""opens files from a remote URL location"""
# parsing url in component parts
(scheme, net_location, path, param, query, frag) = urlparse(target_url)
# checks if string is URL link
if scheme != "http" and scheme != "https" and scheme != "ftp":
raise ValueError("Cannot open url: %s", target_url)
# checks for dropbox link
if net_location == "www.dropbox.com":
# changes dropbox http link into download link
if query == "dl=0":
query2 = "dl=1"
# rebuild download URL, with new query2 variable
target_url = urlunparse((scheme, net_location, path, param, query2, ""))
# checks for google drive link
if net_location == "drive.google.com":
# link configuration for direct download instead of html frame
google_directdl_frag = "https://docs.google.com/uc?export=download&id="
# pull file id
(scheme, net_location, path_raw, param, query, frag) = urlparse(target_url)
id_file = path_raw.split("/")[3]
# rebuild URL for direct download
target_url = google_directdl_frag + id_file
# save url to temporary file
req = Request(target_url)
res = urlopen(req)
return StringIO(str(res.read()))
| en | 0.602681 | # -------------------------------- WebLogo -------------------------------- # Copyright (c) 2003-2004 The Regents of the University of California. # Copyright (c) 2005 <NAME> # Copyright (c) 2006-2015, The Regents of the University of California, through # Lawrence Berkeley National Laboratory (subject to receipt of any required # approvals from the U.S. Dept. of Energy). All rights reserved. # This software is distributed under the new BSD Open Source License. # <http://www.opensource.org/licenses/bsd-license.html> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # (1) Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # (2) Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and or other materials provided with the distribution. # # (3) Neither the name of the University of California, Lawrence Berkeley # National Laboratory, U.S. Dept. of Energy nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Avoid 'from numpy import *' since numpy has lots of names defined # Shorten development version string of the form weblogo-3.6.1.dev43+g64d9f12.d20190304 # from .logo_formatter import (GhostscriptAPI, pdf_formatter, jpeg_formatter, png_formatter, # png_print_formatter, # txt_formatter, eps_formatter, formatters, default_formatter) # ------ META DATA ------ # __all__ = ['LogoOptions', # 'description', # '__version__', # 'LogoFormat', # 'LogoData', # 'GhostscriptAPI', # 'std_color_schemes', # 'default_color_schemes', # 'classic', # 'std_units', # 'std_sizes', # 'std_alphabets', # 'std_percentCG', # 'pdf_formatter', # 'jpeg_formatter', # 'png_formatter', # 'png_print_formatter', # 'txt_formatter', # 'eps_formatter', # 'formatters', # 'default_formatter', # 'base_distribution', # 'equiprobable_distribution', # 'read_seq_data', # 'Color', # 'ColorScheme', # 'parse_prior', # 'release_description', # 'description' # ] # pragma: no cover # ------ DATA ------ # Depends on sequence type # The base stack width is set equal to 9pt Courier. # (Courier has a width equal to 3/5 of the point size.) # Check that can get 80 characters in journal page @small # 40 characters in a journal column # Thermus thermophilus: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>: The genome sequence of the extreme # thermophile Thermus thermophilus. # Nat Biotechnol 2004, 22:547-53 A container for all logo formatting options. Not all of these are directly accessible through the CLI or web interfaces. To display LogoOption defaults:: >>> from weblogo import * >>> LogoOptions() All physical lengths are measured in points. (72 points per inch, 28.3 points per cm) Args: creator_text: Embedded as comment in figures. logo_title: Creates title for the sequence logo logo_label: An optional figure label, added to the top left (e.g. '(a)'). unit_name: See std_units for options. (Default 'bits') yaxis_label: Defaults to unit_name xaxis_label: Add a label to the x-axis, or hide x-axis altogether. fineprint: Defaults to WebLogo name and version show_yaxis: Display entropy scale along y-axis (default: True) show_xaxis: Display sequence numbers along x-axis (default: True) show_ends: Display label at the ends of the sequence (default: False) show_fineprint: Toggle display of the WebLogo version information in the lower right corner. Optional, but we appreciate the acknowledgment. show_errorbars: Draw errorbars (default: False) show_boxes: Draw boxes around stack characters (default: True) debug: Draw extra graphics debugging information. rotate_numbers: Draw xaxis numbers with vertical orientation? scale_width: boolean, scale width of characters proportional to ungaps pad_right: Make a single line logo the same width as multiline logos (default: False) stacks_per_line: Maximum number of logo stacks per logo line. (Default: 40) yaxis_tic_interval: Distance between ticmarks on y-axis(default: 1.0) yaxis_minor_tic_ratio: Distance between minor tic ratio yaxis_scale: Sets height of the y-axis in designated units xaxis_tic_interval: Distance between ticmarks on x-axis(default: 1.0) number_interval: Distance between ticmarks (default: 1.0) shrink_fraction: Proportional shrinkage of characters if show_boxes is true. errorbar_fraction: Sets error bars display proportion errorbar_width_fraction: Sets error bars display errorbar_gray: Sets error bars' gray scale percentage (default .75) resolution: Dots per inch (default: 96). Used for bitmapped output formats default_color: Symbol color if not otherwise specified color_scheme: A custom color scheme can be specified using CSS2 (Cascading Style Sheet) syntax. E.g. 'red', '#F00', '#FF0000', 'rgb(255, 0, 0)', 'rgb(100%, 0%, 0%)' or 'hsl(0, 100%, 50%)' for the color red. stack_width: Scale the visible stack width by the fraction of symbols in the column (I.e. columns with many gaps of unknowns are narrow.) (Default: yes) stack_aspect_ratio: Ratio of stack height to width (default: 5) logo_margin: Default: 2 pts stroke_width: Default: 0.5 pts tic_length: Default: 5 pts stack_margin: Default: 0.5 pts small_fontsize: Small text font size in points fontsize: Regular text font size in points title_fontsize: Title text font size in points number_fontsize: Font size for axis-numbers, in points. text_font: Select font for labels logo_font: Select font for Logo title_font: Select font for Logo's title first_index: Index of first position in sequence data logo_start: Lower bound of sequence to display logo_end: Upper bound of sequence to display Create a new LogoOptions instance. >>> logooptions = LogoOptions(logo_title = "Some Title String") >>> logooptions.show_yaxis = False >>> repr(logooptions) # yaxis_lable default depends on other settings. See LogoFormat # Dots per inch # self.show_color_key = False # NOT yet implemented # If true, draw stacks with largest letters on top. # End class LogoOptions Specifies the format of the logo. Requires LogoData and LogoOptions objects. >>> logodata = LogoData.from_seqs(seqs) >>> logooptions = LogoOptions() >>> logooptions.title = "A Logo Title" >>> format = LogoFormat(logodata, logooptions) Raises: ArgumentError: if arguments are invalid. Create a new LogoFormat instance. # Derived parameters. # Maximum character width. Stack width minus margins. # Attribute to test, test, error message # Run arguments tests. The second, attribute argument to the ArgumentError is # used by the UI to provide user feedback. # FIXME: More validation # Inclusive upper and lower bounds # FIXME: Validate here. Move from eps_formatter # probability units # End __init__ # End class LogoFormat Parse a description of the expected monomer distribution of a sequence. Valid compositions: * None or 'none' No composition sepecified * 'auto' or 'automatic' Use the typical average distribution for proteins and an equiprobable distribution for everything else. * 'equiprobable' All monomers have the same probability. * a percentage, e.g. '45%' or a fraction '0.45' The fraction of CG bases for nucleotide alphabets * a species name, e.g. '<NAME>i', 'H. sapiens', Use the average CG percentage for the species's genome. * An explicit distribution e.g. {'A':10, 'C':40, 'G':40, 'T':10} # pragma: no cover # pragma: no cover Return a dictionary mapping between the names of formats for the sequence data and the corresponing parsers. # Add position weight matrix formats to input parsers by hand Returns a list of the names of accepted sequence data formats. Read sequence data from the input stream and return a seqs object. The environment variable WEBLOGO_MAX_FILE_SIZE overides the max_file_size argument. Used to limit the load on the WebLogo webserver. # If max_file_size is set, or if fin==stdin (which is non-seekable), we # read the data and replace fin with a StringIO object. # Case is significant. Do not count lower case letters. # Add alphabet to seqs. The data needed to generate a sequence logo. Args: alphabet: The set of symbols to count. See also --sequence-type, --ignore-lower-case length: All sequences must be the same length, else WebLogo will return an error counts: An array of character counts entropy: The relative entropy of each column entropy_interval: entropy confidence interval Creates a new LogoData object Build a LogoData object from counts. # Counts is a Motif object? # counts = counts.array # FIXME: fixup .moremath.entropy()? Build a LogoData object from a SeqList, a list of sequences. # --- VALIDATE DATA --- # check that at least one sequence of length at least 1 long # Check sequence lengths # print(i, s, len(s)) # TODO: Redundant? Should be checked in SeqList? # FIXME: Check seqs.alphabet? # LogoData", file=out) # Show column names # Write the data table # pragma: no cover opens files from a remote URL location # parsing url in component parts # checks if string is URL link # checks for dropbox link # changes dropbox http link into download link # rebuild download URL, with new query2 variable # checks for google drive link # link configuration for direct download instead of html frame # pull file id # rebuild URL for direct download # save url to temporary file | 1.05893 | 1 |
appengine/swarming/server/task_scheduler.py | Slayo2008/New2 | 1 | 6630141 | # Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""High level tasks execution scheduling API.
This is the interface closest to the HTTP handlers.
"""
import datetime
import logging
import math
import random
import time
from google.appengine.ext import ndb
from components import auth
from components import datastore_utils
from components import pubsub
from components import utils
import event_mon_metrics
import ts_mon_metrics
from server import acl
from server import config
from server import task_pack
from server import task_queues
from server import task_request
from server import task_result
from server import task_to_run
### Private stuff.
_PROBABILITY_OF_QUICK_COMEBACK = 0.05
def _secs_to_ms(value):
"""Converts a seconds value in float to the number of ms as an integer."""
return int(round(value * 1000.))
def _expire_task(to_run_key, request):
"""Expires a TaskResultSummary and unschedules the TaskToRun.
Returns:
True on success.
"""
# Look if the TaskToRun is reapable once before doing the check inside the
# transaction. This reduces the likelihood of failing this check inside the
# transaction, which is an order of magnitude more costly.
if not to_run_key.get().is_reapable:
logging.info('Not reapable anymore')
return None
result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
now = utils.utcnow()
def run():
# 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
to_run_future = to_run_key.get_async()
result_summary_future = result_summary_key.get_async()
to_run = to_run_future.get_result()
if not to_run or not to_run.is_reapable:
result_summary_future.wait()
return False
to_run.queue_number = None
result_summary = result_summary_future.get_result()
if result_summary.try_number:
# It's a retry that is being expired. Keep the old state. That requires an
# additional pipelined GET but that shouldn't be the common case.
run_result = result_summary.run_result_key.get()
result_summary.set_from_run_result(run_result, request)
else:
result_summary.state = task_result.State.EXPIRED
result_summary.abandoned_ts = now
result_summary.modified_ts = now
futures = ndb.put_multi_async((to_run, result_summary))
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return True
# Add it to the negative cache *before* running the transaction. Either way
# the task was already reaped or the task is correctly expired and not
# reapable.
task_to_run.set_lookup_cache(to_run_key, False)
# It'll be caught by next cron job execution in case of failure.
try:
success = datastore_utils.transaction(run)
except datastore_utils.CommitError:
success = False
if success:
logging.info(
'Expired %s', task_pack.pack_result_summary_key(result_summary_key))
return success
def _reap_task(bot_dimensions, bot_version, to_run_key, request):
"""Reaps a task and insert the results entity.
Returns:
(TaskRunResult, SecretBytes) if successful, (None, None) otherwise.
"""
assert request.key == task_to_run.task_to_run_key_to_request_key(to_run_key)
result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
bot_id = bot_dimensions[u'id'][0]
now = utils.utcnow()
# Log before the task id in case the function fails in a bad state where the
# DB TX ran but the reply never comes to the bot. This is the worst case as
# this leads to a task that results in BOT_DIED without ever starting. This
# case is specifically handled in cron_handle_bot_died().
logging.info(
'_reap_task(%s)', task_pack.pack_result_summary_key(result_summary_key))
def run():
# 3 GET, 1 PUT at the end.
to_run_future = to_run_key.get_async()
result_summary_future = result_summary_key.get_async()
if request.properties.has_secret_bytes:
secret_bytes_future = request.secret_bytes_key.get_async()
to_run = to_run_future.get_result()
result_summary = result_summary_future.get_result()
orig_summary_state = result_summary.state
secret_bytes = None
if request.properties.has_secret_bytes:
secret_bytes = secret_bytes_future.get_result()
if not to_run:
logging.error('Missing TaskToRun?\n%s', result_summary.task_id)
return None, None
if not to_run.is_reapable:
logging.info('%s is not reapable', result_summary.task_id)
return None, None
if result_summary.bot_id == bot_id:
# This means two things, first it's a retry, second it's that the first
# try failed and the retry is being reaped by the same bot. Deny that, as
# the bot may be deeply broken and could be in a killing spree.
# TODO(maruel): Allow retry for bot locked task using 'id' dimension.
logging.warning(
'%s can\'t retry its own internal failure task',
result_summary.task_id)
return None, None
to_run.queue_number = None
run_result = task_result.new_run_result(
request, (result_summary.try_number or 0) + 1, bot_id, bot_version,
bot_dimensions)
# Upon bot reap, both .started_ts and .modified_ts matches. They differ on
# the first ping.
run_result.started_ts = now
run_result.modified_ts = now
result_summary.set_from_run_result(run_result, request)
ndb.put_multi([to_run, run_result, result_summary])
if result_summary.state != orig_summary_state:
_maybe_pubsub_notify_via_tq(result_summary, request)
return run_result, secret_bytes
# Add it to the negative cache *before* running the transaction. This will
# inhibit concurrently readers to try to reap this task. The downside is if
# this request fails in the middle of the transaction, the task may stay
# unreapable for up to 15 seconds.
task_to_run.set_lookup_cache(to_run_key, False)
try:
run_result, secret_bytes = datastore_utils.transaction(run, retries=0)
except datastore_utils.CommitError:
# The challenge here is that the transaction may have failed because:
# - The DB had an hickup and the TaskToRun, TaskRunResult and
# TaskResultSummary haven't been updated.
# - The entities had been updated by a concurrent transaction on another
# handler so it was not reapable anyway. This does cause exceptions as
# both GET returns the TaskToRun.queue_number != None but only one succeed
# at the PUT.
#
# In the first case, we may want to reset the negative cache, while we don't
# want to in the later case. The trade off are one of:
# - negative cache is incorrectly set, so the task is not reapable for 15s
# - resetting the negative cache would cause even more contention
#
# We chose the first one here for now, as the when the DB starts misbehaving
# and the index becomes stale, it means the DB is *already* not in good
# shape, so it is preferable to not put more stress on it, and skipping a
# few tasks for 15s may even actively help the DB to stabilize.
logging.info('CommitError; reaping failed')
# The bot will reap the next available task in case of failure, no big deal.
run_result = None
secret_bytes = None
return run_result, secret_bytes
def _handle_dead_bot(run_result_key):
"""Handles TaskRunResult where its bot has stopped showing sign of life.
Transactionally updates the entities depending on the state of this task. The
task may be retried automatically, canceled or left alone.
Returns:
True if the task was retried, False if the task was killed, None if no
action was done.
"""
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request_key = task_pack.result_summary_key_to_request_key(result_summary_key)
request_future = request_key.get_async()
now = utils.utcnow()
server_version = utils.get_app_version()
packed = task_pack.pack_run_result_key(run_result_key)
request = request_future.get_result()
to_run_key = task_to_run.request_to_task_to_run_key(request)
def run():
"""Returns tuple(task_is_retried or None, bot_id)."""
# Do one GET, one PUT at the end.
run_result, result_summary, to_run = ndb.get_multi(
(run_result_key, result_summary_key, to_run_key))
if run_result.state != task_result.State.RUNNING:
# It was updated already or not updating last. Likely DB index was stale.
return None, run_result.bot_id
if run_result.modified_ts > now - task_result.BOT_PING_TOLERANCE:
# The query index IS stale.
return None, run_result.bot_id
run_result.signal_server_version(server_version)
old_modified = run_result.modified_ts
run_result.modified_ts = now
orig_summary_state = result_summary.state
if result_summary.try_number != run_result.try_number:
# Not updating correct run_result, cancel it without touching
# result_summary.
to_put = (run_result,)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
task_is_retried = None
elif (result_summary.try_number == 1 and now < request.expiration_ts and
(request.properties.idempotent or
run_result.started_ts == old_modified)):
# Retry it. It fits:
# - first try
# - not yet expired
# - One of:
# - idempotent
# - task hadn't got any ping at all from task_runner.run_command()
# TODO(maruel): Allow retry for bot locked task using 'id' dimension.
to_put = (run_result, result_summary, to_run)
to_run.queue_number = task_to_run.gen_queue_number(request)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
# Do not sync data from run_result to result_summary, since the task is
# being retried.
result_summary.reset_to_pending()
result_summary.modified_ts = now
task_is_retried = True
else:
# Kill it as BOT_DIED, there was more than one try, the task expired in
# the meantime or it wasn't idempotent.
to_put = (run_result, result_summary)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
result_summary.set_from_run_result(run_result, request)
task_is_retried = False
futures = ndb.put_multi_async(to_put)
# if result_summary.state != orig_summary_state:
if orig_summary_state != result_summary.state:
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return task_is_retried
# Remove it from the negative cache *before* running the transaction. Either
# way the TaskToRun.queue_number was not set so there was no contention on
# this entity. At best the task is reenqueued for a retry.
task_to_run.set_lookup_cache(to_run_key, True)
try:
task_is_retried = datastore_utils.transaction(run)
except datastore_utils.CommitError:
task_is_retried = None
if task_is_retried:
logging.info('Retried %s', packed)
elif task_is_retried == False:
logging.debug('Ignored %s', packed)
return task_is_retried
def _copy_summary(src, dst, skip_list):
"""Copies the attributes of entity src into dst.
It doesn't copy the key nor any member in skip_list.
"""
assert type(src) == type(dst), '%s!=%s' % (src.__class__, dst.__class__)
# Access to a protected member _XX of a client class - pylint: disable=W0212
kwargs = {
k: getattr(src, k) for k in src._properties_fixed() if k not in skip_list
}
dst.populate(**kwargs)
def _maybe_pubsub_notify_now(result_summary, request):
"""Examines result_summary and sends task completion PubSub message.
Does it only if result_summary indicates a task in some finished state and
the request is specifying pubsub topic.
Returns False to trigger the retry (on transient errors), or True if retry is
not needed (e.g. messages was sent successfully or fatal error happened).
"""
assert not ndb.in_transaction()
assert isinstance(
result_summary, task_result.TaskResultSummary), result_summary
assert isinstance(request, task_request.TaskRequest), request
if (result_summary.state in task_result.State.STATES_NOT_RUNNING and
request.pubsub_topic):
task_id = task_pack.pack_result_summary_key(result_summary.key)
try:
_pubsub_notify(
task_id, request.pubsub_topic,
request.pubsub_auth_token, request.pubsub_userdata)
except pubsub.TransientError:
logging.exception('Transient error when sending PubSub notification')
return False
except pubsub.Error:
logging.exception('Fatal error when sending PubSub notification')
return True # do not retry it
return True
def _maybe_pubsub_notify_via_tq(result_summary, request):
"""Examines result_summary and enqueues a task to send PubSub message.
Must be called within a transaction.
Raises CommitError on errors (to abort the transaction).
"""
assert ndb.in_transaction()
assert isinstance(
result_summary, task_result.TaskResultSummary), result_summary
assert isinstance(request, task_request.TaskRequest), request
if request.pubsub_topic:
task_id = task_pack.pack_result_summary_key(result_summary.key)
ok = utils.enqueue_task(
url='/internal/taskqueue/pubsub/%s' % task_id,
queue_name='pubsub',
transactional=True,
payload=utils.encode_to_json({
'task_id': task_id,
'topic': request.pubsub_topic,
'auth_token': request.pubsub_auth_token,
'userdata': request.pubsub_userdata,
}))
if not ok:
raise datastore_utils.CommitError('Failed to enqueue task')
def _pubsub_notify(task_id, topic, auth_token, userdata):
"""Sends PubSub notification about task completion.
Raises pubsub.TransientError on transient errors. Fatal errors are logged, but
not retried.
"""
logging.debug(
'Sending PubSub notify to "%s" (with userdata "%s") about '
'completion of "%s"', topic, userdata, task_id)
msg = {'task_id': task_id}
if userdata:
msg['userdata'] = userdata
try:
pubsub.publish(
topic=topic,
message=utils.encode_to_json(msg),
attributes={'auth_token': auth_token} if auth_token else None)
except pubsub.Error:
logging.exception('Fatal error when sending PubSub notification')
def _check_dimension_acls(request):
"""Raises AuthorizationError if some requested dimensions are forbidden.
Uses 'dimension_acls' field from the settings. See proto/config.proto.
"""
dim_acls = config.settings().dimension_acls
if not dim_acls or not dim_acls.entry:
return # not configured, this is fine
ident = request.authenticated
dims = request.properties.dimensions
assert 'id' in dims or 'pool' in dims, dims # see _validate_dimensions
assert ident is not None # see task_request.init_new_request
# Forbid targeting individual bots for non-admins, but allow using 'id' if
# 'pool' is used as well (so whoever can posts tasks to 'pool', can target an
# individual bot in that pool).
if 'id' in dims and 'pool' not in dims:
if not acl.is_admin():
raise auth.AuthorizationError(
'Only Swarming administrators can post tasks with "id" dimension '
'without specifying a "pool" dimension.')
for k, v in sorted(dims.iteritems()):
if not _can_use_dimension(dim_acls, ident, k, v):
raise auth.AuthorizationError(
'User %s is not allowed to schedule tasks with dimension "%s:%s"' %
(ident.to_bytes(), k, v))
def _can_use_dimension(dim_acls, ident, k, v):
"""Returns True if 'dimension_acls' allow the given dimension to be used.
Args:
dim_acls: config_pb2.DimensionACLs message.
ident: auth.Identity to check.
k: dimension name.
v: dimension value.
"""
for e in dim_acls.entry:
if '%s:%s' % (k, v) in e.dimension or '%s:*' % k in e.dimension:
return auth.is_group_member(e.usable_by, ident)
# A dimension not mentioned in 'dimension_acls' is allowed by default.
return True
def _find_dupe_task(now, h):
"""Finds a previously run task that is also idempotent and completed.
Fetch items that can be used to dedupe the task. See the comment for this
property for more details.
Do not use "task_result.TaskResultSummary.created_ts > oldest" here because
this would require a composite index. It's unnecessary because TaskRequest.key
is equivalent to decreasing TaskRequest.created_ts, ordering by key works as
well and doesn't require a composite index.
"""
# TODO(maruel): Make a reverse map on successful task completion so this
# becomes a simple ndb.get().
cls = task_result.TaskResultSummary
q = cls.query(cls.properties_hash==h).order(cls.key)
for i, dupe_summary in enumerate(q.iter(batch_size=1)):
# It is possible for the query to return stale items.
if (dupe_summary.state != task_result.State.COMPLETED or
dupe_summary.failure):
if i == 2:
# Indexes are very inconsistent, give up.
return None
continue
# Refuse tasks older than X days. This is due to the isolate server
# dropping files.
# TODO(maruel): The value should be calculated from the isolate server
# setting and be unbounded when no isolated input was used.
oldest = now - datetime.timedelta(
seconds=config.settings().reusable_task_age_secs)
if dupe_summary.created_ts <= oldest:
return None
return dupe_summary
return None
### Public API.
def exponential_backoff(attempt_num):
"""Returns an exponential backoff value in seconds."""
assert attempt_num >= 0
if random.random() < _PROBABILITY_OF_QUICK_COMEBACK:
# Randomly ask the bot to return quickly.
return 1.0
# If the user provided a max then use it, otherwise use default 60s.
max_wait = config.settings().max_bot_sleep_time or 60.
return min(max_wait, math.pow(1.5, min(attempt_num, 10) + 1))
def schedule_request(request, secret_bytes, check_acls=True):
"""Creates and stores all the entities to schedule a new task request.
Checks ACLs first. Raises auth.AuthorizationError if caller is not authorized
to post this request.
The number of entities created is 3: TaskRequest, TaskToRun and
TaskResultSummary.
All 4 entities in the same entity group (TaskReqest, TaskToRun,
TaskResultSummary, SecretBytes) are saved as a DB transaction.
Arguments:
- request: TaskRequest entity to be saved in the DB. It's key must not be set
and the entity must not be saved in the DB yet.
- secret_bytes: SecretBytes entity to be saved in the DB. It's key will be set
and the entity will be stored by this function. None is allowed if
there are no SecretBytes for this task.
- check_acls: Whether the request should check ACLs.
Returns:
TaskResultSummary. TaskToRun is not returned.
"""
assert isinstance(request, task_request.TaskRequest), request
assert not request.key, request.key
# Raises AuthorizationError with helpful message if the request.authorized
# can't use some of the requested dimensions.
if check_acls:
_check_dimension_acls(request)
# This does a DB GET, occasionally triggers a task queue. May throw, which is
# surfaced to the user but it is safe as the task request wasn't stored yet.
task_queues.assert_task(request)
now = utils.utcnow()
request.key = task_request.new_request_key()
task = task_to_run.new_task_to_run(request)
result_summary = task_result.new_result_summary(request)
result_summary.modified_ts = now
if secret_bytes:
secret_bytes.key = request.secret_bytes_key
def get_new_keys():
# Warning: this assumes knowledge about the hierarchy of each entity.
key = task_request.new_request_key()
task.key = ndb.Key(task.key.kind(), task.key.id(), parent=key)
if secret_bytes:
secret_bytes.key = ndb.Key(
secret_bytes.key.kind(), secret_bytes.key.id(), parent=key)
old = result_summary.task_id
result_summary.key = ndb.Key(
result_summary.key.kind(), result_summary.key.id(), parent=key)
logging.info('%s conflicted, using %s', old, result_summary.task_id)
return key
deduped = False
if request.properties.idempotent:
dupe_summary = _find_dupe_task(now, request.properties_hash)
if dupe_summary:
# Setting task.queue_number to None removes it from the scheduling.
task.queue_number = None
_copy_summary(
dupe_summary, result_summary,
('created_ts', 'modified_ts', 'name', 'user', 'tags'))
# Zap irrelevant properties. PerformanceStats is also not copied over,
# since it's not relevant.
result_summary.properties_hash = None
result_summary.try_number = 0
result_summary.cost_saved_usd = result_summary.cost_usd
# Only zap after.
result_summary.costs_usd = []
result_summary.deduped_from = task_pack.pack_run_result_key(
dupe_summary.run_result_key)
# In this code path, there's not much to do as the task will not be run,
# previous results are returned. We still need to store all the entities
# correctly. However, since the has_secret_bytes property is already set
# for UI purposes, and the task itself will never be run, we skip storing
# the SecretBytes, as they would never be read and will just consume space
# in the datastore (and the task we deduplicated with will have them
# stored anyway, if we really want to get them again).
datastore_utils.insert(
request, get_new_keys, extra=[task, result_summary])
logging.debug(
'New request %s reusing %s', result_summary.task_id,
dupe_summary.task_id)
deduped = True
if not deduped:
# Storing these entities makes this task live. It is important at this point
# that the HTTP handler returns as fast as possible, otherwise the task will
# be run but the client will not know about it.
datastore_utils.insert(request, get_new_keys,
extra=filter(bool, [task, result_summary, secret_bytes]))
logging.debug('New request %s', result_summary.task_id)
# Get parent task details if applicable.
if request.parent_task_id:
parent_run_key = task_pack.unpack_run_result_key(request.parent_task_id)
parent_task_keys = [
parent_run_key,
task_pack.run_result_key_to_result_summary_key(parent_run_key),
]
def run_parent():
# This one is slower.
items = ndb.get_multi(parent_task_keys)
k = result_summary.task_id
for item in items:
item.children_task_ids.append(k)
item.modified_ts = now
ndb.put_multi(items)
# Raising will abort to the caller. There's a risk that for tasks with
# parent tasks, the task will be lost due to this transaction.
# TODO(maruel): An option is to update the parent task as part of a cron
# job, which would remove this code from the critical path.
datastore_utils.transaction(run_parent)
ts_mon_metrics.update_jobs_requested_metrics(result_summary, deduped)
return result_summary
def bot_reap_task(bot_dimensions, bot_version, deadline):
"""Reaps a TaskToRun if one is available.
The process is to find a TaskToRun where its .queue_number is set, then
create a TaskRunResult for it.
Returns:
tuple of (TaskRequest, SecretBytes, TaskRunResult) for the task that was
reaped. The TaskToRun involved is not returned.
"""
start = time.time()
bot_id = bot_dimensions[u'id'][0]
iterated = 0
failures = 0
try:
q = task_to_run.yield_next_available_task_to_dispatch(
bot_dimensions, deadline)
for request, to_run in q:
iterated += 1
run_result, secret_bytes = _reap_task(
bot_dimensions, bot_version, to_run.key, request)
if not run_result:
failures += 1
# Sad thing is that there is not way here to know the try number.
logging.info(
'failed to reap: %s0',
task_pack.pack_request_key(to_run.request_key))
continue
logging.info('Reaped: %s', run_result.task_id)
return request, secret_bytes, run_result
return None, None, None
finally:
logging.debug(
'bot_reap_task(%s) in %.3fs: %d iterated, %d failure',
bot_id, time.time()-start, iterated, failures)
def bot_update_task(
run_result_key, bot_id, output, output_chunk_start, exit_code, duration,
hard_timeout, io_timeout, cost_usd, outputs_ref, cipd_pins,
performance_stats):
"""Updates a TaskRunResult and TaskResultSummary, along TaskOutputChunk.
Arguments:
- run_result_key: ndb.Key to TaskRunResult.
- bot_id: Self advertised bot id to ensure it's the one expected.
- output: Data to append to this command output.
- output_chunk_start: Index of output in the stdout stream.
- exit_code: Mark that this task completed.
- duration: Time spent in seconds for this task, excluding overheads.
- hard_timeout: Bool set if an hard timeout occured.
- io_timeout: Bool set if an I/O timeout occured.
- cost_usd: Cost in $USD of this task up to now.
- outputs_ref: task_request.FilesRef instance or None.
- cipd_pins: None or task_result.CipdPins
- performance_stats: task_result.PerformanceStats instance or None. Can only
be set when the task is completing.
Invalid states, these are flat out refused:
- A command is updated after it had an exit code assigned to.
Returns:
TaskRunResult.state or None in case of failure.
"""
assert output_chunk_start is None or isinstance(output_chunk_start, int)
assert output is None or isinstance(output, str)
if cost_usd is not None and cost_usd < 0.:
raise ValueError('cost_usd must be None or greater or equal than 0')
if duration is not None and duration < 0.:
raise ValueError('duration must be None or greater or equal than 0')
if (duration is None) != (exit_code is None):
raise ValueError(
'had unexpected duration; expected iff a command completes\n'
'duration: %r; exit: %r' % (duration, exit_code))
if performance_stats and duration is None:
raise ValueError(
'duration must be set when performance_stats is set\n'
'duration: %s; performance_stats: %s' %
(duration, performance_stats))
packed = task_pack.pack_run_result_key(run_result_key)
logging.debug(
'bot_update_task(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
packed, bot_id, len(output) if output else output, output_chunk_start,
exit_code, duration, hard_timeout, io_timeout, cost_usd, outputs_ref,
cipd_pins, performance_stats)
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request_key = task_pack.result_summary_key_to_request_key(result_summary_key)
request_future = request_key.get_async()
server_version = utils.get_app_version()
request = request_future.get_result()
now = utils.utcnow()
def run():
"""Returns tuple(TaskRunResult, bool(completed), str(error)).
Any error is returned as a string to be passed to logging.error() instead of
logging inside the transaction for performance.
"""
# 2 consecutive GETs, one PUT.
run_result_future = run_result_key.get_async()
result_summary_future = result_summary_key.get_async()
run_result = run_result_future.get_result()
if not run_result:
result_summary_future.wait()
return None, None, 'is missing'
if run_result.bot_id != bot_id:
result_summary_future.wait()
return None, None, (
'expected bot (%s) but had update from bot %s' % (
run_result.bot_id, bot_id))
if not run_result.started_ts:
return None, None, 'TaskRunResult is broken; %s' % (
run_result.to_dict())
# Assumptions:
# - duration and exit_code are both set or not set.
# - same for run_result.
if exit_code is not None:
if run_result.exit_code is not None:
# This happens as an HTTP request is retried when the DB write succeeded
# but it still returned HTTP 500.
if run_result.exit_code != exit_code:
result_summary_future.wait()
return None, None, 'got 2 different exit_code; %s then %s' % (
run_result.exit_code, exit_code)
if run_result.duration != duration:
result_summary_future.wait()
return None, None, 'got 2 different durations; %s then %s' % (
run_result.duration, duration)
else:
run_result.duration = duration
run_result.exit_code = exit_code
if outputs_ref:
run_result.outputs_ref = outputs_ref
if cipd_pins:
run_result.cipd_pins = cipd_pins
if run_result.state in task_result.State.STATES_RUNNING:
if hard_timeout or io_timeout:
run_result.state = task_result.State.TIMED_OUT
run_result.completed_ts = now
elif run_result.exit_code is not None:
run_result.state = task_result.State.COMPLETED
run_result.completed_ts = now
run_result.signal_server_version(server_version)
run_result.validate(request)
to_put = [run_result]
if output:
# This does 1 multi GETs. This also modifies run_result in place.
to_put.extend(run_result.append_output(output, output_chunk_start or 0))
if performance_stats:
performance_stats.key = task_pack.run_result_key_to_performance_stats_key(
run_result.key)
to_put.append(performance_stats)
run_result.cost_usd = max(cost_usd, run_result.cost_usd or 0.)
run_result.modified_ts = now
result_summary = result_summary_future.get_result()
if (result_summary.try_number and
result_summary.try_number > run_result.try_number):
# The situation where a shard is retried but the bot running the previous
# try somehow reappears and reports success, the result must still show
# the last try's result. We still need to update cost_usd manually.
result_summary.costs_usd[run_result.try_number-1] = run_result.cost_usd
result_summary.modified_ts = now
else:
result_summary.set_from_run_result(run_result, request)
result_summary.validate(request)
to_put.append(result_summary)
ndb.put_multi(to_put)
return result_summary, run_result, None
try:
smry, run_result, error = datastore_utils.transaction(run)
except datastore_utils.CommitError as e:
logging.info('Got commit error: %s', e)
# It is important that the caller correctly surface this error.
return None
assert bool(error) != bool(run_result), (error, run_result)
if error:
logging.error('Task %s %s', packed, error)
return None
# Caller must retry if PubSub enqueue fails.
task_completed = run_result.state != task_result.State.RUNNING
if not _maybe_pubsub_notify_now(smry, request):
return None
if task_completed:
event_mon_metrics.send_task_event(smry)
ts_mon_metrics.update_jobs_completed_metrics(smry)
return run_result.state
def bot_kill_task(run_result_key, bot_id):
"""Terminates a task that is currently running as an internal failure.
Returns:
str if an error message.
"""
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request = task_pack.result_summary_key_to_request_key(
result_summary_key).get()
server_version = utils.get_app_version()
now = utils.utcnow()
packed = task_pack.pack_run_result_key(run_result_key)
def run():
run_result, result_summary = ndb.get_multi(
(run_result_key, result_summary_key))
if bot_id and run_result.bot_id != bot_id:
return 'Bot %s sent task kill for task %s owned by bot %s' % (
bot_id, packed, run_result.bot_id)
if run_result.state == task_result.State.BOT_DIED:
# Ignore this failure.
return None
run_result.signal_server_version(server_version)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
run_result.modified_ts = now
result_summary.set_from_run_result(run_result, None)
futures = ndb.put_multi_async((run_result, result_summary))
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return None
try:
msg = datastore_utils.transaction(run)
except datastore_utils.CommitError as e:
# At worst, the task will be tagged as BOT_DIED after BOT_PING_TOLERANCE
# seconds passed on the next cron_handle_bot_died cron job.
return 'Failed killing task %s: %s' % (packed, e)
return msg
def cancel_task(request, result_key):
"""Cancels a task if possible.
Ensures that the associated TaskToRun is canceled and updates the
TaskResultSummary/TaskRunResult accordingly.
Warning: ACL check must have been done before.
"""
to_run_key = task_to_run.request_to_task_to_run_key(request)
if result_key.kind() == 'TaskRunResult':
result_key = task_pack.run_result_key_to_result_summary_key(result_key)
now = utils.utcnow()
def run():
to_run, result_summary = ndb.get_multi((to_run_key, result_key))
was_running = result_summary.state == task_result.State.RUNNING
if not result_summary.can_be_canceled:
return False, was_running
to_run.queue_number = None
result_summary.state = task_result.State.CANCELED
result_summary.abandoned_ts = now
result_summary.modified_ts = now
futures = ndb.put_multi_async((to_run, result_summary))
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return True, was_running
# Add it to the negative cache *before* running the transaction. Either way
# the task was already reaped or the task is correctly canceled thus not
# reapable.
task_to_run.set_lookup_cache(to_run_key, False)
try:
ok, was_running = datastore_utils.transaction(run)
except datastore_utils.CommitError as e:
packed = task_pack.pack_result_summary_key(result_key)
return 'Failed killing task %s: %s' % (packed, e)
# TODO(maruel): Add paper trail.
return ok, was_running
### Cron job.
def cron_abort_expired_task_to_run(host):
"""Aborts expired TaskToRun requests to execute a TaskRequest on a bot.
Three reasons can cause this situation:
- Higher throughput of task requests incoming than the rate task requests
being completed, e.g. there's not enough bots to run all the tasks that gets
in at the current rate. That's normal overflow and must be handled
accordingly.
- No bot connected that satisfies the requested dimensions. This is trickier,
it is either a typo in the dimensions or bots all died and the admins must
reconnect them.
- Server has internal failures causing it to fail to either distribute the
tasks or properly receive results from the bots.
Returns:
Packed tasks ids of aborted tasks.
"""
killed = []
skipped = 0
try:
for to_run in task_to_run.yield_expired_task_to_run():
request = to_run.request_key.get()
if _expire_task(to_run.key, request):
# TODO(maruel): Know which try it is.
killed.append(request)
ts_mon_metrics.tasks_expired.increment(
fields=ts_mon_metrics.extract_job_fields(request.tags))
else:
# It's not a big deal, the bot will continue running.
skipped += 1
finally:
if killed:
logging.warning(
'EXPIRED!\n%d tasks:\n%s',
len(killed),
'\n'.join(
' %s/user/task/%s %s' % (host, i.task_id, i.properties.dimensions)
for i in killed))
logging.info('Killed %d task, skipped %d', len(killed), skipped)
return [i.task_id for i in killed]
def cron_handle_bot_died(host):
"""Aborts or retry stale TaskRunResult where the bot stopped sending updates.
If the task was at its first try, it'll be retried. Otherwise the task will be
canceled.
Returns:
- task IDs killed
- number of task retried
- number of task ignored
"""
ignored = 0
killed = []
retried = 0
try:
for run_result_key in task_result.yield_run_result_keys_with_dead_bot():
result = _handle_dead_bot(run_result_key)
if result is True:
retried += 1
elif result is False:
killed.append(task_pack.pack_run_result_key(run_result_key))
else:
ignored += 1
finally:
if killed:
logging.error(
'BOT_DIED!\n%d tasks:\n%s',
len(killed),
'\n'.join(' %s/user/task/%s' % (host, i) for i in killed))
logging.info(
'Killed %d; retried %d; ignored: %d', len(killed), retried, ignored)
return killed, retried, ignored
## Task queue tasks.
def task_handle_pubsub_task(payload):
"""Handles task enqueued by _maybe_pubsub_notify_via_tq."""
# Do not catch errors to trigger task queue task retry. Errors should not
# happen in normal case.
_pubsub_notify(
payload['task_id'], payload['topic'],
payload['auth_token'], payload['userdata'])
| # Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""High level tasks execution scheduling API.
This is the interface closest to the HTTP handlers.
"""
import datetime
import logging
import math
import random
import time
from google.appengine.ext import ndb
from components import auth
from components import datastore_utils
from components import pubsub
from components import utils
import event_mon_metrics
import ts_mon_metrics
from server import acl
from server import config
from server import task_pack
from server import task_queues
from server import task_request
from server import task_result
from server import task_to_run
### Private stuff.
_PROBABILITY_OF_QUICK_COMEBACK = 0.05
def _secs_to_ms(value):
"""Converts a seconds value in float to the number of ms as an integer."""
return int(round(value * 1000.))
def _expire_task(to_run_key, request):
"""Expires a TaskResultSummary and unschedules the TaskToRun.
Returns:
True on success.
"""
# Look if the TaskToRun is reapable once before doing the check inside the
# transaction. This reduces the likelihood of failing this check inside the
# transaction, which is an order of magnitude more costly.
if not to_run_key.get().is_reapable:
logging.info('Not reapable anymore')
return None
result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
now = utils.utcnow()
def run():
# 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
to_run_future = to_run_key.get_async()
result_summary_future = result_summary_key.get_async()
to_run = to_run_future.get_result()
if not to_run or not to_run.is_reapable:
result_summary_future.wait()
return False
to_run.queue_number = None
result_summary = result_summary_future.get_result()
if result_summary.try_number:
# It's a retry that is being expired. Keep the old state. That requires an
# additional pipelined GET but that shouldn't be the common case.
run_result = result_summary.run_result_key.get()
result_summary.set_from_run_result(run_result, request)
else:
result_summary.state = task_result.State.EXPIRED
result_summary.abandoned_ts = now
result_summary.modified_ts = now
futures = ndb.put_multi_async((to_run, result_summary))
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return True
# Add it to the negative cache *before* running the transaction. Either way
# the task was already reaped or the task is correctly expired and not
# reapable.
task_to_run.set_lookup_cache(to_run_key, False)
# It'll be caught by next cron job execution in case of failure.
try:
success = datastore_utils.transaction(run)
except datastore_utils.CommitError:
success = False
if success:
logging.info(
'Expired %s', task_pack.pack_result_summary_key(result_summary_key))
return success
def _reap_task(bot_dimensions, bot_version, to_run_key, request):
"""Reaps a task and insert the results entity.
Returns:
(TaskRunResult, SecretBytes) if successful, (None, None) otherwise.
"""
assert request.key == task_to_run.task_to_run_key_to_request_key(to_run_key)
result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
bot_id = bot_dimensions[u'id'][0]
now = utils.utcnow()
# Log before the task id in case the function fails in a bad state where the
# DB TX ran but the reply never comes to the bot. This is the worst case as
# this leads to a task that results in BOT_DIED without ever starting. This
# case is specifically handled in cron_handle_bot_died().
logging.info(
'_reap_task(%s)', task_pack.pack_result_summary_key(result_summary_key))
def run():
# 3 GET, 1 PUT at the end.
to_run_future = to_run_key.get_async()
result_summary_future = result_summary_key.get_async()
if request.properties.has_secret_bytes:
secret_bytes_future = request.secret_bytes_key.get_async()
to_run = to_run_future.get_result()
result_summary = result_summary_future.get_result()
orig_summary_state = result_summary.state
secret_bytes = None
if request.properties.has_secret_bytes:
secret_bytes = secret_bytes_future.get_result()
if not to_run:
logging.error('Missing TaskToRun?\n%s', result_summary.task_id)
return None, None
if not to_run.is_reapable:
logging.info('%s is not reapable', result_summary.task_id)
return None, None
if result_summary.bot_id == bot_id:
# This means two things, first it's a retry, second it's that the first
# try failed and the retry is being reaped by the same bot. Deny that, as
# the bot may be deeply broken and could be in a killing spree.
# TODO(maruel): Allow retry for bot locked task using 'id' dimension.
logging.warning(
'%s can\'t retry its own internal failure task',
result_summary.task_id)
return None, None
to_run.queue_number = None
run_result = task_result.new_run_result(
request, (result_summary.try_number or 0) + 1, bot_id, bot_version,
bot_dimensions)
# Upon bot reap, both .started_ts and .modified_ts matches. They differ on
# the first ping.
run_result.started_ts = now
run_result.modified_ts = now
result_summary.set_from_run_result(run_result, request)
ndb.put_multi([to_run, run_result, result_summary])
if result_summary.state != orig_summary_state:
_maybe_pubsub_notify_via_tq(result_summary, request)
return run_result, secret_bytes
# Add it to the negative cache *before* running the transaction. This will
# inhibit concurrently readers to try to reap this task. The downside is if
# this request fails in the middle of the transaction, the task may stay
# unreapable for up to 15 seconds.
task_to_run.set_lookup_cache(to_run_key, False)
try:
run_result, secret_bytes = datastore_utils.transaction(run, retries=0)
except datastore_utils.CommitError:
# The challenge here is that the transaction may have failed because:
# - The DB had an hickup and the TaskToRun, TaskRunResult and
# TaskResultSummary haven't been updated.
# - The entities had been updated by a concurrent transaction on another
# handler so it was not reapable anyway. This does cause exceptions as
# both GET returns the TaskToRun.queue_number != None but only one succeed
# at the PUT.
#
# In the first case, we may want to reset the negative cache, while we don't
# want to in the later case. The trade off are one of:
# - negative cache is incorrectly set, so the task is not reapable for 15s
# - resetting the negative cache would cause even more contention
#
# We chose the first one here for now, as the when the DB starts misbehaving
# and the index becomes stale, it means the DB is *already* not in good
# shape, so it is preferable to not put more stress on it, and skipping a
# few tasks for 15s may even actively help the DB to stabilize.
logging.info('CommitError; reaping failed')
# The bot will reap the next available task in case of failure, no big deal.
run_result = None
secret_bytes = None
return run_result, secret_bytes
def _handle_dead_bot(run_result_key):
"""Handles TaskRunResult where its bot has stopped showing sign of life.
Transactionally updates the entities depending on the state of this task. The
task may be retried automatically, canceled or left alone.
Returns:
True if the task was retried, False if the task was killed, None if no
action was done.
"""
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request_key = task_pack.result_summary_key_to_request_key(result_summary_key)
request_future = request_key.get_async()
now = utils.utcnow()
server_version = utils.get_app_version()
packed = task_pack.pack_run_result_key(run_result_key)
request = request_future.get_result()
to_run_key = task_to_run.request_to_task_to_run_key(request)
def run():
"""Returns tuple(task_is_retried or None, bot_id)."""
# Do one GET, one PUT at the end.
run_result, result_summary, to_run = ndb.get_multi(
(run_result_key, result_summary_key, to_run_key))
if run_result.state != task_result.State.RUNNING:
# It was updated already or not updating last. Likely DB index was stale.
return None, run_result.bot_id
if run_result.modified_ts > now - task_result.BOT_PING_TOLERANCE:
# The query index IS stale.
return None, run_result.bot_id
run_result.signal_server_version(server_version)
old_modified = run_result.modified_ts
run_result.modified_ts = now
orig_summary_state = result_summary.state
if result_summary.try_number != run_result.try_number:
# Not updating correct run_result, cancel it without touching
# result_summary.
to_put = (run_result,)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
task_is_retried = None
elif (result_summary.try_number == 1 and now < request.expiration_ts and
(request.properties.idempotent or
run_result.started_ts == old_modified)):
# Retry it. It fits:
# - first try
# - not yet expired
# - One of:
# - idempotent
# - task hadn't got any ping at all from task_runner.run_command()
# TODO(maruel): Allow retry for bot locked task using 'id' dimension.
to_put = (run_result, result_summary, to_run)
to_run.queue_number = task_to_run.gen_queue_number(request)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
# Do not sync data from run_result to result_summary, since the task is
# being retried.
result_summary.reset_to_pending()
result_summary.modified_ts = now
task_is_retried = True
else:
# Kill it as BOT_DIED, there was more than one try, the task expired in
# the meantime or it wasn't idempotent.
to_put = (run_result, result_summary)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
result_summary.set_from_run_result(run_result, request)
task_is_retried = False
futures = ndb.put_multi_async(to_put)
# if result_summary.state != orig_summary_state:
if orig_summary_state != result_summary.state:
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return task_is_retried
# Remove it from the negative cache *before* running the transaction. Either
# way the TaskToRun.queue_number was not set so there was no contention on
# this entity. At best the task is reenqueued for a retry.
task_to_run.set_lookup_cache(to_run_key, True)
try:
task_is_retried = datastore_utils.transaction(run)
except datastore_utils.CommitError:
task_is_retried = None
if task_is_retried:
logging.info('Retried %s', packed)
elif task_is_retried == False:
logging.debug('Ignored %s', packed)
return task_is_retried
def _copy_summary(src, dst, skip_list):
"""Copies the attributes of entity src into dst.
It doesn't copy the key nor any member in skip_list.
"""
assert type(src) == type(dst), '%s!=%s' % (src.__class__, dst.__class__)
# Access to a protected member _XX of a client class - pylint: disable=W0212
kwargs = {
k: getattr(src, k) for k in src._properties_fixed() if k not in skip_list
}
dst.populate(**kwargs)
def _maybe_pubsub_notify_now(result_summary, request):
"""Examines result_summary and sends task completion PubSub message.
Does it only if result_summary indicates a task in some finished state and
the request is specifying pubsub topic.
Returns False to trigger the retry (on transient errors), or True if retry is
not needed (e.g. messages was sent successfully or fatal error happened).
"""
assert not ndb.in_transaction()
assert isinstance(
result_summary, task_result.TaskResultSummary), result_summary
assert isinstance(request, task_request.TaskRequest), request
if (result_summary.state in task_result.State.STATES_NOT_RUNNING and
request.pubsub_topic):
task_id = task_pack.pack_result_summary_key(result_summary.key)
try:
_pubsub_notify(
task_id, request.pubsub_topic,
request.pubsub_auth_token, request.pubsub_userdata)
except pubsub.TransientError:
logging.exception('Transient error when sending PubSub notification')
return False
except pubsub.Error:
logging.exception('Fatal error when sending PubSub notification')
return True # do not retry it
return True
def _maybe_pubsub_notify_via_tq(result_summary, request):
"""Examines result_summary and enqueues a task to send PubSub message.
Must be called within a transaction.
Raises CommitError on errors (to abort the transaction).
"""
assert ndb.in_transaction()
assert isinstance(
result_summary, task_result.TaskResultSummary), result_summary
assert isinstance(request, task_request.TaskRequest), request
if request.pubsub_topic:
task_id = task_pack.pack_result_summary_key(result_summary.key)
ok = utils.enqueue_task(
url='/internal/taskqueue/pubsub/%s' % task_id,
queue_name='pubsub',
transactional=True,
payload=utils.encode_to_json({
'task_id': task_id,
'topic': request.pubsub_topic,
'auth_token': request.pubsub_auth_token,
'userdata': request.pubsub_userdata,
}))
if not ok:
raise datastore_utils.CommitError('Failed to enqueue task')
def _pubsub_notify(task_id, topic, auth_token, userdata):
"""Sends PubSub notification about task completion.
Raises pubsub.TransientError on transient errors. Fatal errors are logged, but
not retried.
"""
logging.debug(
'Sending PubSub notify to "%s" (with userdata "%s") about '
'completion of "%s"', topic, userdata, task_id)
msg = {'task_id': task_id}
if userdata:
msg['userdata'] = userdata
try:
pubsub.publish(
topic=topic,
message=utils.encode_to_json(msg),
attributes={'auth_token': auth_token} if auth_token else None)
except pubsub.Error:
logging.exception('Fatal error when sending PubSub notification')
def _check_dimension_acls(request):
"""Raises AuthorizationError if some requested dimensions are forbidden.
Uses 'dimension_acls' field from the settings. See proto/config.proto.
"""
dim_acls = config.settings().dimension_acls
if not dim_acls or not dim_acls.entry:
return # not configured, this is fine
ident = request.authenticated
dims = request.properties.dimensions
assert 'id' in dims or 'pool' in dims, dims # see _validate_dimensions
assert ident is not None # see task_request.init_new_request
# Forbid targeting individual bots for non-admins, but allow using 'id' if
# 'pool' is used as well (so whoever can posts tasks to 'pool', can target an
# individual bot in that pool).
if 'id' in dims and 'pool' not in dims:
if not acl.is_admin():
raise auth.AuthorizationError(
'Only Swarming administrators can post tasks with "id" dimension '
'without specifying a "pool" dimension.')
for k, v in sorted(dims.iteritems()):
if not _can_use_dimension(dim_acls, ident, k, v):
raise auth.AuthorizationError(
'User %s is not allowed to schedule tasks with dimension "%s:%s"' %
(ident.to_bytes(), k, v))
def _can_use_dimension(dim_acls, ident, k, v):
"""Returns True if 'dimension_acls' allow the given dimension to be used.
Args:
dim_acls: config_pb2.DimensionACLs message.
ident: auth.Identity to check.
k: dimension name.
v: dimension value.
"""
for e in dim_acls.entry:
if '%s:%s' % (k, v) in e.dimension or '%s:*' % k in e.dimension:
return auth.is_group_member(e.usable_by, ident)
# A dimension not mentioned in 'dimension_acls' is allowed by default.
return True
def _find_dupe_task(now, h):
"""Finds a previously run task that is also idempotent and completed.
Fetch items that can be used to dedupe the task. See the comment for this
property for more details.
Do not use "task_result.TaskResultSummary.created_ts > oldest" here because
this would require a composite index. It's unnecessary because TaskRequest.key
is equivalent to decreasing TaskRequest.created_ts, ordering by key works as
well and doesn't require a composite index.
"""
# TODO(maruel): Make a reverse map on successful task completion so this
# becomes a simple ndb.get().
cls = task_result.TaskResultSummary
q = cls.query(cls.properties_hash==h).order(cls.key)
for i, dupe_summary in enumerate(q.iter(batch_size=1)):
# It is possible for the query to return stale items.
if (dupe_summary.state != task_result.State.COMPLETED or
dupe_summary.failure):
if i == 2:
# Indexes are very inconsistent, give up.
return None
continue
# Refuse tasks older than X days. This is due to the isolate server
# dropping files.
# TODO(maruel): The value should be calculated from the isolate server
# setting and be unbounded when no isolated input was used.
oldest = now - datetime.timedelta(
seconds=config.settings().reusable_task_age_secs)
if dupe_summary.created_ts <= oldest:
return None
return dupe_summary
return None
### Public API.
def exponential_backoff(attempt_num):
"""Returns an exponential backoff value in seconds."""
assert attempt_num >= 0
if random.random() < _PROBABILITY_OF_QUICK_COMEBACK:
# Randomly ask the bot to return quickly.
return 1.0
# If the user provided a max then use it, otherwise use default 60s.
max_wait = config.settings().max_bot_sleep_time or 60.
return min(max_wait, math.pow(1.5, min(attempt_num, 10) + 1))
def schedule_request(request, secret_bytes, check_acls=True):
"""Creates and stores all the entities to schedule a new task request.
Checks ACLs first. Raises auth.AuthorizationError if caller is not authorized
to post this request.
The number of entities created is 3: TaskRequest, TaskToRun and
TaskResultSummary.
All 4 entities in the same entity group (TaskReqest, TaskToRun,
TaskResultSummary, SecretBytes) are saved as a DB transaction.
Arguments:
- request: TaskRequest entity to be saved in the DB. It's key must not be set
and the entity must not be saved in the DB yet.
- secret_bytes: SecretBytes entity to be saved in the DB. It's key will be set
and the entity will be stored by this function. None is allowed if
there are no SecretBytes for this task.
- check_acls: Whether the request should check ACLs.
Returns:
TaskResultSummary. TaskToRun is not returned.
"""
assert isinstance(request, task_request.TaskRequest), request
assert not request.key, request.key
# Raises AuthorizationError with helpful message if the request.authorized
# can't use some of the requested dimensions.
if check_acls:
_check_dimension_acls(request)
# This does a DB GET, occasionally triggers a task queue. May throw, which is
# surfaced to the user but it is safe as the task request wasn't stored yet.
task_queues.assert_task(request)
now = utils.utcnow()
request.key = task_request.new_request_key()
task = task_to_run.new_task_to_run(request)
result_summary = task_result.new_result_summary(request)
result_summary.modified_ts = now
if secret_bytes:
secret_bytes.key = request.secret_bytes_key
def get_new_keys():
# Warning: this assumes knowledge about the hierarchy of each entity.
key = task_request.new_request_key()
task.key = ndb.Key(task.key.kind(), task.key.id(), parent=key)
if secret_bytes:
secret_bytes.key = ndb.Key(
secret_bytes.key.kind(), secret_bytes.key.id(), parent=key)
old = result_summary.task_id
result_summary.key = ndb.Key(
result_summary.key.kind(), result_summary.key.id(), parent=key)
logging.info('%s conflicted, using %s', old, result_summary.task_id)
return key
deduped = False
if request.properties.idempotent:
dupe_summary = _find_dupe_task(now, request.properties_hash)
if dupe_summary:
# Setting task.queue_number to None removes it from the scheduling.
task.queue_number = None
_copy_summary(
dupe_summary, result_summary,
('created_ts', 'modified_ts', 'name', 'user', 'tags'))
# Zap irrelevant properties. PerformanceStats is also not copied over,
# since it's not relevant.
result_summary.properties_hash = None
result_summary.try_number = 0
result_summary.cost_saved_usd = result_summary.cost_usd
# Only zap after.
result_summary.costs_usd = []
result_summary.deduped_from = task_pack.pack_run_result_key(
dupe_summary.run_result_key)
# In this code path, there's not much to do as the task will not be run,
# previous results are returned. We still need to store all the entities
# correctly. However, since the has_secret_bytes property is already set
# for UI purposes, and the task itself will never be run, we skip storing
# the SecretBytes, as they would never be read and will just consume space
# in the datastore (and the task we deduplicated with will have them
# stored anyway, if we really want to get them again).
datastore_utils.insert(
request, get_new_keys, extra=[task, result_summary])
logging.debug(
'New request %s reusing %s', result_summary.task_id,
dupe_summary.task_id)
deduped = True
if not deduped:
# Storing these entities makes this task live. It is important at this point
# that the HTTP handler returns as fast as possible, otherwise the task will
# be run but the client will not know about it.
datastore_utils.insert(request, get_new_keys,
extra=filter(bool, [task, result_summary, secret_bytes]))
logging.debug('New request %s', result_summary.task_id)
# Get parent task details if applicable.
if request.parent_task_id:
parent_run_key = task_pack.unpack_run_result_key(request.parent_task_id)
parent_task_keys = [
parent_run_key,
task_pack.run_result_key_to_result_summary_key(parent_run_key),
]
def run_parent():
# This one is slower.
items = ndb.get_multi(parent_task_keys)
k = result_summary.task_id
for item in items:
item.children_task_ids.append(k)
item.modified_ts = now
ndb.put_multi(items)
# Raising will abort to the caller. There's a risk that for tasks with
# parent tasks, the task will be lost due to this transaction.
# TODO(maruel): An option is to update the parent task as part of a cron
# job, which would remove this code from the critical path.
datastore_utils.transaction(run_parent)
ts_mon_metrics.update_jobs_requested_metrics(result_summary, deduped)
return result_summary
def bot_reap_task(bot_dimensions, bot_version, deadline):
"""Reaps a TaskToRun if one is available.
The process is to find a TaskToRun where its .queue_number is set, then
create a TaskRunResult for it.
Returns:
tuple of (TaskRequest, SecretBytes, TaskRunResult) for the task that was
reaped. The TaskToRun involved is not returned.
"""
start = time.time()
bot_id = bot_dimensions[u'id'][0]
iterated = 0
failures = 0
try:
q = task_to_run.yield_next_available_task_to_dispatch(
bot_dimensions, deadline)
for request, to_run in q:
iterated += 1
run_result, secret_bytes = _reap_task(
bot_dimensions, bot_version, to_run.key, request)
if not run_result:
failures += 1
# Sad thing is that there is not way here to know the try number.
logging.info(
'failed to reap: %s0',
task_pack.pack_request_key(to_run.request_key))
continue
logging.info('Reaped: %s', run_result.task_id)
return request, secret_bytes, run_result
return None, None, None
finally:
logging.debug(
'bot_reap_task(%s) in %.3fs: %d iterated, %d failure',
bot_id, time.time()-start, iterated, failures)
def bot_update_task(
run_result_key, bot_id, output, output_chunk_start, exit_code, duration,
hard_timeout, io_timeout, cost_usd, outputs_ref, cipd_pins,
performance_stats):
"""Updates a TaskRunResult and TaskResultSummary, along TaskOutputChunk.
Arguments:
- run_result_key: ndb.Key to TaskRunResult.
- bot_id: Self advertised bot id to ensure it's the one expected.
- output: Data to append to this command output.
- output_chunk_start: Index of output in the stdout stream.
- exit_code: Mark that this task completed.
- duration: Time spent in seconds for this task, excluding overheads.
- hard_timeout: Bool set if an hard timeout occured.
- io_timeout: Bool set if an I/O timeout occured.
- cost_usd: Cost in $USD of this task up to now.
- outputs_ref: task_request.FilesRef instance or None.
- cipd_pins: None or task_result.CipdPins
- performance_stats: task_result.PerformanceStats instance or None. Can only
be set when the task is completing.
Invalid states, these are flat out refused:
- A command is updated after it had an exit code assigned to.
Returns:
TaskRunResult.state or None in case of failure.
"""
assert output_chunk_start is None or isinstance(output_chunk_start, int)
assert output is None or isinstance(output, str)
if cost_usd is not None and cost_usd < 0.:
raise ValueError('cost_usd must be None or greater or equal than 0')
if duration is not None and duration < 0.:
raise ValueError('duration must be None or greater or equal than 0')
if (duration is None) != (exit_code is None):
raise ValueError(
'had unexpected duration; expected iff a command completes\n'
'duration: %r; exit: %r' % (duration, exit_code))
if performance_stats and duration is None:
raise ValueError(
'duration must be set when performance_stats is set\n'
'duration: %s; performance_stats: %s' %
(duration, performance_stats))
packed = task_pack.pack_run_result_key(run_result_key)
logging.debug(
'bot_update_task(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
packed, bot_id, len(output) if output else output, output_chunk_start,
exit_code, duration, hard_timeout, io_timeout, cost_usd, outputs_ref,
cipd_pins, performance_stats)
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request_key = task_pack.result_summary_key_to_request_key(result_summary_key)
request_future = request_key.get_async()
server_version = utils.get_app_version()
request = request_future.get_result()
now = utils.utcnow()
def run():
"""Returns tuple(TaskRunResult, bool(completed), str(error)).
Any error is returned as a string to be passed to logging.error() instead of
logging inside the transaction for performance.
"""
# 2 consecutive GETs, one PUT.
run_result_future = run_result_key.get_async()
result_summary_future = result_summary_key.get_async()
run_result = run_result_future.get_result()
if not run_result:
result_summary_future.wait()
return None, None, 'is missing'
if run_result.bot_id != bot_id:
result_summary_future.wait()
return None, None, (
'expected bot (%s) but had update from bot %s' % (
run_result.bot_id, bot_id))
if not run_result.started_ts:
return None, None, 'TaskRunResult is broken; %s' % (
run_result.to_dict())
# Assumptions:
# - duration and exit_code are both set or not set.
# - same for run_result.
if exit_code is not None:
if run_result.exit_code is not None:
# This happens as an HTTP request is retried when the DB write succeeded
# but it still returned HTTP 500.
if run_result.exit_code != exit_code:
result_summary_future.wait()
return None, None, 'got 2 different exit_code; %s then %s' % (
run_result.exit_code, exit_code)
if run_result.duration != duration:
result_summary_future.wait()
return None, None, 'got 2 different durations; %s then %s' % (
run_result.duration, duration)
else:
run_result.duration = duration
run_result.exit_code = exit_code
if outputs_ref:
run_result.outputs_ref = outputs_ref
if cipd_pins:
run_result.cipd_pins = cipd_pins
if run_result.state in task_result.State.STATES_RUNNING:
if hard_timeout or io_timeout:
run_result.state = task_result.State.TIMED_OUT
run_result.completed_ts = now
elif run_result.exit_code is not None:
run_result.state = task_result.State.COMPLETED
run_result.completed_ts = now
run_result.signal_server_version(server_version)
run_result.validate(request)
to_put = [run_result]
if output:
# This does 1 multi GETs. This also modifies run_result in place.
to_put.extend(run_result.append_output(output, output_chunk_start or 0))
if performance_stats:
performance_stats.key = task_pack.run_result_key_to_performance_stats_key(
run_result.key)
to_put.append(performance_stats)
run_result.cost_usd = max(cost_usd, run_result.cost_usd or 0.)
run_result.modified_ts = now
result_summary = result_summary_future.get_result()
if (result_summary.try_number and
result_summary.try_number > run_result.try_number):
# The situation where a shard is retried but the bot running the previous
# try somehow reappears and reports success, the result must still show
# the last try's result. We still need to update cost_usd manually.
result_summary.costs_usd[run_result.try_number-1] = run_result.cost_usd
result_summary.modified_ts = now
else:
result_summary.set_from_run_result(run_result, request)
result_summary.validate(request)
to_put.append(result_summary)
ndb.put_multi(to_put)
return result_summary, run_result, None
try:
smry, run_result, error = datastore_utils.transaction(run)
except datastore_utils.CommitError as e:
logging.info('Got commit error: %s', e)
# It is important that the caller correctly surface this error.
return None
assert bool(error) != bool(run_result), (error, run_result)
if error:
logging.error('Task %s %s', packed, error)
return None
# Caller must retry if PubSub enqueue fails.
task_completed = run_result.state != task_result.State.RUNNING
if not _maybe_pubsub_notify_now(smry, request):
return None
if task_completed:
event_mon_metrics.send_task_event(smry)
ts_mon_metrics.update_jobs_completed_metrics(smry)
return run_result.state
def bot_kill_task(run_result_key, bot_id):
"""Terminates a task that is currently running as an internal failure.
Returns:
str if an error message.
"""
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request = task_pack.result_summary_key_to_request_key(
result_summary_key).get()
server_version = utils.get_app_version()
now = utils.utcnow()
packed = task_pack.pack_run_result_key(run_result_key)
def run():
run_result, result_summary = ndb.get_multi(
(run_result_key, result_summary_key))
if bot_id and run_result.bot_id != bot_id:
return 'Bot %s sent task kill for task %s owned by bot %s' % (
bot_id, packed, run_result.bot_id)
if run_result.state == task_result.State.BOT_DIED:
# Ignore this failure.
return None
run_result.signal_server_version(server_version)
run_result.state = task_result.State.BOT_DIED
run_result.internal_failure = True
run_result.abandoned_ts = now
run_result.modified_ts = now
result_summary.set_from_run_result(run_result, None)
futures = ndb.put_multi_async((run_result, result_summary))
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return None
try:
msg = datastore_utils.transaction(run)
except datastore_utils.CommitError as e:
# At worst, the task will be tagged as BOT_DIED after BOT_PING_TOLERANCE
# seconds passed on the next cron_handle_bot_died cron job.
return 'Failed killing task %s: %s' % (packed, e)
return msg
def cancel_task(request, result_key):
"""Cancels a task if possible.
Ensures that the associated TaskToRun is canceled and updates the
TaskResultSummary/TaskRunResult accordingly.
Warning: ACL check must have been done before.
"""
to_run_key = task_to_run.request_to_task_to_run_key(request)
if result_key.kind() == 'TaskRunResult':
result_key = task_pack.run_result_key_to_result_summary_key(result_key)
now = utils.utcnow()
def run():
to_run, result_summary = ndb.get_multi((to_run_key, result_key))
was_running = result_summary.state == task_result.State.RUNNING
if not result_summary.can_be_canceled:
return False, was_running
to_run.queue_number = None
result_summary.state = task_result.State.CANCELED
result_summary.abandoned_ts = now
result_summary.modified_ts = now
futures = ndb.put_multi_async((to_run, result_summary))
_maybe_pubsub_notify_via_tq(result_summary, request)
for f in futures:
f.check_success()
return True, was_running
# Add it to the negative cache *before* running the transaction. Either way
# the task was already reaped or the task is correctly canceled thus not
# reapable.
task_to_run.set_lookup_cache(to_run_key, False)
try:
ok, was_running = datastore_utils.transaction(run)
except datastore_utils.CommitError as e:
packed = task_pack.pack_result_summary_key(result_key)
return 'Failed killing task %s: %s' % (packed, e)
# TODO(maruel): Add paper trail.
return ok, was_running
### Cron job.
def cron_abort_expired_task_to_run(host):
"""Aborts expired TaskToRun requests to execute a TaskRequest on a bot.
Three reasons can cause this situation:
- Higher throughput of task requests incoming than the rate task requests
being completed, e.g. there's not enough bots to run all the tasks that gets
in at the current rate. That's normal overflow and must be handled
accordingly.
- No bot connected that satisfies the requested dimensions. This is trickier,
it is either a typo in the dimensions or bots all died and the admins must
reconnect them.
- Server has internal failures causing it to fail to either distribute the
tasks or properly receive results from the bots.
Returns:
Packed tasks ids of aborted tasks.
"""
killed = []
skipped = 0
try:
for to_run in task_to_run.yield_expired_task_to_run():
request = to_run.request_key.get()
if _expire_task(to_run.key, request):
# TODO(maruel): Know which try it is.
killed.append(request)
ts_mon_metrics.tasks_expired.increment(
fields=ts_mon_metrics.extract_job_fields(request.tags))
else:
# It's not a big deal, the bot will continue running.
skipped += 1
finally:
if killed:
logging.warning(
'EXPIRED!\n%d tasks:\n%s',
len(killed),
'\n'.join(
' %s/user/task/%s %s' % (host, i.task_id, i.properties.dimensions)
for i in killed))
logging.info('Killed %d task, skipped %d', len(killed), skipped)
return [i.task_id for i in killed]
def cron_handle_bot_died(host):
"""Aborts or retry stale TaskRunResult where the bot stopped sending updates.
If the task was at its first try, it'll be retried. Otherwise the task will be
canceled.
Returns:
- task IDs killed
- number of task retried
- number of task ignored
"""
ignored = 0
killed = []
retried = 0
try:
for run_result_key in task_result.yield_run_result_keys_with_dead_bot():
result = _handle_dead_bot(run_result_key)
if result is True:
retried += 1
elif result is False:
killed.append(task_pack.pack_run_result_key(run_result_key))
else:
ignored += 1
finally:
if killed:
logging.error(
'BOT_DIED!\n%d tasks:\n%s',
len(killed),
'\n'.join(' %s/user/task/%s' % (host, i) for i in killed))
logging.info(
'Killed %d; retried %d; ignored: %d', len(killed), retried, ignored)
return killed, retried, ignored
## Task queue tasks.
def task_handle_pubsub_task(payload):
"""Handles task enqueued by _maybe_pubsub_notify_via_tq."""
# Do not catch errors to trigger task queue task retry. Errors should not
# happen in normal case.
_pubsub_notify(
payload['task_id'], payload['topic'],
payload['auth_token'], payload['userdata'])
| en | 0.908832 | # Copyright 2014 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. High level tasks execution scheduling API. This is the interface closest to the HTTP handlers. ### Private stuff. Converts a seconds value in float to the number of ms as an integer. Expires a TaskResultSummary and unschedules the TaskToRun. Returns: True on success. # Look if the TaskToRun is reapable once before doing the check inside the # transaction. This reduces the likelihood of failing this check inside the # transaction, which is an order of magnitude more costly. # 2 concurrent GET, one PUT. Optionally with an additional serialized GET. # It's a retry that is being expired. Keep the old state. That requires an # additional pipelined GET but that shouldn't be the common case. # Add it to the negative cache *before* running the transaction. Either way # the task was already reaped or the task is correctly expired and not # reapable. # It'll be caught by next cron job execution in case of failure. Reaps a task and insert the results entity. Returns: (TaskRunResult, SecretBytes) if successful, (None, None) otherwise. # Log before the task id in case the function fails in a bad state where the # DB TX ran but the reply never comes to the bot. This is the worst case as # this leads to a task that results in BOT_DIED without ever starting. This # case is specifically handled in cron_handle_bot_died(). # 3 GET, 1 PUT at the end. # This means two things, first it's a retry, second it's that the first # try failed and the retry is being reaped by the same bot. Deny that, as # the bot may be deeply broken and could be in a killing spree. # TODO(maruel): Allow retry for bot locked task using 'id' dimension. # Upon bot reap, both .started_ts and .modified_ts matches. They differ on # the first ping. # Add it to the negative cache *before* running the transaction. This will # inhibit concurrently readers to try to reap this task. The downside is if # this request fails in the middle of the transaction, the task may stay # unreapable for up to 15 seconds. # The challenge here is that the transaction may have failed because: # - The DB had an hickup and the TaskToRun, TaskRunResult and # TaskResultSummary haven't been updated. # - The entities had been updated by a concurrent transaction on another # handler so it was not reapable anyway. This does cause exceptions as # both GET returns the TaskToRun.queue_number != None but only one succeed # at the PUT. # # In the first case, we may want to reset the negative cache, while we don't # want to in the later case. The trade off are one of: # - negative cache is incorrectly set, so the task is not reapable for 15s # - resetting the negative cache would cause even more contention # # We chose the first one here for now, as the when the DB starts misbehaving # and the index becomes stale, it means the DB is *already* not in good # shape, so it is preferable to not put more stress on it, and skipping a # few tasks for 15s may even actively help the DB to stabilize. # The bot will reap the next available task in case of failure, no big deal. Handles TaskRunResult where its bot has stopped showing sign of life. Transactionally updates the entities depending on the state of this task. The task may be retried automatically, canceled or left alone. Returns: True if the task was retried, False if the task was killed, None if no action was done. Returns tuple(task_is_retried or None, bot_id). # Do one GET, one PUT at the end. # It was updated already or not updating last. Likely DB index was stale. # The query index IS stale. # Not updating correct run_result, cancel it without touching # result_summary. # Retry it. It fits: # - first try # - not yet expired # - One of: # - idempotent # - task hadn't got any ping at all from task_runner.run_command() # TODO(maruel): Allow retry for bot locked task using 'id' dimension. # Do not sync data from run_result to result_summary, since the task is # being retried. # Kill it as BOT_DIED, there was more than one try, the task expired in # the meantime or it wasn't idempotent. # if result_summary.state != orig_summary_state: # Remove it from the negative cache *before* running the transaction. Either # way the TaskToRun.queue_number was not set so there was no contention on # this entity. At best the task is reenqueued for a retry. Copies the attributes of entity src into dst. It doesn't copy the key nor any member in skip_list. # Access to a protected member _XX of a client class - pylint: disable=W0212 Examines result_summary and sends task completion PubSub message. Does it only if result_summary indicates a task in some finished state and the request is specifying pubsub topic. Returns False to trigger the retry (on transient errors), or True if retry is not needed (e.g. messages was sent successfully or fatal error happened). # do not retry it Examines result_summary and enqueues a task to send PubSub message. Must be called within a transaction. Raises CommitError on errors (to abort the transaction). Sends PubSub notification about task completion. Raises pubsub.TransientError on transient errors. Fatal errors are logged, but not retried. Raises AuthorizationError if some requested dimensions are forbidden. Uses 'dimension_acls' field from the settings. See proto/config.proto. # not configured, this is fine # see _validate_dimensions # see task_request.init_new_request # Forbid targeting individual bots for non-admins, but allow using 'id' if # 'pool' is used as well (so whoever can posts tasks to 'pool', can target an # individual bot in that pool). Returns True if 'dimension_acls' allow the given dimension to be used. Args: dim_acls: config_pb2.DimensionACLs message. ident: auth.Identity to check. k: dimension name. v: dimension value. # A dimension not mentioned in 'dimension_acls' is allowed by default. Finds a previously run task that is also idempotent and completed. Fetch items that can be used to dedupe the task. See the comment for this property for more details. Do not use "task_result.TaskResultSummary.created_ts > oldest" here because this would require a composite index. It's unnecessary because TaskRequest.key is equivalent to decreasing TaskRequest.created_ts, ordering by key works as well and doesn't require a composite index. # TODO(maruel): Make a reverse map on successful task completion so this # becomes a simple ndb.get(). # It is possible for the query to return stale items. # Indexes are very inconsistent, give up. # Refuse tasks older than X days. This is due to the isolate server # dropping files. # TODO(maruel): The value should be calculated from the isolate server # setting and be unbounded when no isolated input was used. ### Public API. Returns an exponential backoff value in seconds. # Randomly ask the bot to return quickly. # If the user provided a max then use it, otherwise use default 60s. Creates and stores all the entities to schedule a new task request. Checks ACLs first. Raises auth.AuthorizationError if caller is not authorized to post this request. The number of entities created is 3: TaskRequest, TaskToRun and TaskResultSummary. All 4 entities in the same entity group (TaskReqest, TaskToRun, TaskResultSummary, SecretBytes) are saved as a DB transaction. Arguments: - request: TaskRequest entity to be saved in the DB. It's key must not be set and the entity must not be saved in the DB yet. - secret_bytes: SecretBytes entity to be saved in the DB. It's key will be set and the entity will be stored by this function. None is allowed if there are no SecretBytes for this task. - check_acls: Whether the request should check ACLs. Returns: TaskResultSummary. TaskToRun is not returned. # Raises AuthorizationError with helpful message if the request.authorized # can't use some of the requested dimensions. # This does a DB GET, occasionally triggers a task queue. May throw, which is # surfaced to the user but it is safe as the task request wasn't stored yet. # Warning: this assumes knowledge about the hierarchy of each entity. # Setting task.queue_number to None removes it from the scheduling. # Zap irrelevant properties. PerformanceStats is also not copied over, # since it's not relevant. # Only zap after. # In this code path, there's not much to do as the task will not be run, # previous results are returned. We still need to store all the entities # correctly. However, since the has_secret_bytes property is already set # for UI purposes, and the task itself will never be run, we skip storing # the SecretBytes, as they would never be read and will just consume space # in the datastore (and the task we deduplicated with will have them # stored anyway, if we really want to get them again). # Storing these entities makes this task live. It is important at this point # that the HTTP handler returns as fast as possible, otherwise the task will # be run but the client will not know about it. # Get parent task details if applicable. # This one is slower. # Raising will abort to the caller. There's a risk that for tasks with # parent tasks, the task will be lost due to this transaction. # TODO(maruel): An option is to update the parent task as part of a cron # job, which would remove this code from the critical path. Reaps a TaskToRun if one is available. The process is to find a TaskToRun where its .queue_number is set, then create a TaskRunResult for it. Returns: tuple of (TaskRequest, SecretBytes, TaskRunResult) for the task that was reaped. The TaskToRun involved is not returned. # Sad thing is that there is not way here to know the try number. Updates a TaskRunResult and TaskResultSummary, along TaskOutputChunk. Arguments: - run_result_key: ndb.Key to TaskRunResult. - bot_id: Self advertised bot id to ensure it's the one expected. - output: Data to append to this command output. - output_chunk_start: Index of output in the stdout stream. - exit_code: Mark that this task completed. - duration: Time spent in seconds for this task, excluding overheads. - hard_timeout: Bool set if an hard timeout occured. - io_timeout: Bool set if an I/O timeout occured. - cost_usd: Cost in $USD of this task up to now. - outputs_ref: task_request.FilesRef instance or None. - cipd_pins: None or task_result.CipdPins - performance_stats: task_result.PerformanceStats instance or None. Can only be set when the task is completing. Invalid states, these are flat out refused: - A command is updated after it had an exit code assigned to. Returns: TaskRunResult.state or None in case of failure. Returns tuple(TaskRunResult, bool(completed), str(error)). Any error is returned as a string to be passed to logging.error() instead of logging inside the transaction for performance. # 2 consecutive GETs, one PUT. # Assumptions: # - duration and exit_code are both set or not set. # - same for run_result. # This happens as an HTTP request is retried when the DB write succeeded # but it still returned HTTP 500. # This does 1 multi GETs. This also modifies run_result in place. # The situation where a shard is retried but the bot running the previous # try somehow reappears and reports success, the result must still show # the last try's result. We still need to update cost_usd manually. # It is important that the caller correctly surface this error. # Caller must retry if PubSub enqueue fails. Terminates a task that is currently running as an internal failure. Returns: str if an error message. # Ignore this failure. # At worst, the task will be tagged as BOT_DIED after BOT_PING_TOLERANCE # seconds passed on the next cron_handle_bot_died cron job. Cancels a task if possible. Ensures that the associated TaskToRun is canceled and updates the TaskResultSummary/TaskRunResult accordingly. Warning: ACL check must have been done before. # Add it to the negative cache *before* running the transaction. Either way # the task was already reaped or the task is correctly canceled thus not # reapable. # TODO(maruel): Add paper trail. ### Cron job. Aborts expired TaskToRun requests to execute a TaskRequest on a bot. Three reasons can cause this situation: - Higher throughput of task requests incoming than the rate task requests being completed, e.g. there's not enough bots to run all the tasks that gets in at the current rate. That's normal overflow and must be handled accordingly. - No bot connected that satisfies the requested dimensions. This is trickier, it is either a typo in the dimensions or bots all died and the admins must reconnect them. - Server has internal failures causing it to fail to either distribute the tasks or properly receive results from the bots. Returns: Packed tasks ids of aborted tasks. # TODO(maruel): Know which try it is. # It's not a big deal, the bot will continue running. Aborts or retry stale TaskRunResult where the bot stopped sending updates. If the task was at its first try, it'll be retried. Otherwise the task will be canceled. Returns: - task IDs killed - number of task retried - number of task ignored ## Task queue tasks. Handles task enqueued by _maybe_pubsub_notify_via_tq. # Do not catch errors to trigger task queue task retry. Errors should not # happen in normal case. | 2.052392 | 2 |
pororo/models/brainOCR/modules/feature_extraction.py | jayten42/pororo | 1,137 | 6630142 | import torch.nn as nn
class VGGFeatureExtractor(nn.Module):
""" FeatureExtractor of CRNN (https://arxiv.org/pdf/1507.05717.pdf) """
def __init__(self,
n_input_channels: int = 1,
n_output_channels: int = 512,
opt2val=None):
super(VGGFeatureExtractor, self).__init__()
self.output_channel = [
int(n_output_channels / 8),
int(n_output_channels / 4),
int(n_output_channels / 2),
n_output_channels,
] # [64, 128, 256, 512]
rec_model_ckpt_fp = opt2val["rec_model_ckpt_fp"]
if "baseline" in rec_model_ckpt_fp:
self.ConvNet = nn.Sequential(
nn.Conv2d(n_input_channels, self.output_channel[0], 3, 1, 1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1,
1),
nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
# nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
nn.ConvTranspose2d(self.output_channel[3],
self.output_channel[3], 2, 2),
nn.ReLU(True),
) # 512x4x50
else:
self.ConvNet = nn.Sequential(
nn.Conv2d(n_input_channels, self.output_channel[0], 3, 1, 1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1,
1),
nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
# nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
nn.ConvTranspose2d(self.output_channel[3],
self.output_channel[3], 2, 2),
nn.ReLU(True), # 512x4x50
nn.ConvTranspose2d(self.output_channel[3],
self.output_channel[3], 2, 2),
nn.ReLU(True),
) # 512x4x50
def forward(self, x):
return self.ConvNet(x)
class ResNetFeatureExtractor(nn.Module):
"""
FeatureExtractor of FAN
(http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf)
"""
def __init__(self, n_input_channels: int = 1, n_output_channels: int = 512):
super(ResNetFeatureExtractor, self).__init__()
self.ConvNet = ResNet(n_input_channels, n_output_channels, BasicBlock,
[1, 2, 5, 3])
def forward(self, inputs):
return self.ConvNet(inputs)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self._conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def _conv3x3(self, in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, n_input_channels: int, n_output_channels: int, block,
layers):
"""
:param n_input_channels (int): The number of input channels of the feature extractor
:param n_output_channels (int): The number of output channels of the feature extractor
:param block:
:param layers:
"""
super(ResNet, self).__init__()
self.output_channel_blocks = [
int(n_output_channels / 4),
int(n_output_channels / 2),
n_output_channels,
n_output_channels,
]
self.inplanes = int(n_output_channels / 8)
self.conv0_1 = nn.Conv2d(
n_input_channels,
int(n_output_channels / 16),
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn0_1 = nn.BatchNorm2d(int(n_output_channels / 16))
self.conv0_2 = nn.Conv2d(
int(n_output_channels / 16),
self.inplanes,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_blocks[0],
layers[0])
self.conv1 = nn.Conv2d(
self.output_channel_blocks[0],
self.output_channel_blocks[0],
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(self.output_channel_blocks[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block,
self.output_channel_blocks[1],
layers[1],
stride=1)
self.conv2 = nn.Conv2d(
self.output_channel_blocks[1],
self.output_channel_blocks[1],
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn2 = nn.BatchNorm2d(self.output_channel_blocks[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2,
stride=(2, 1),
padding=(0, 1))
self.layer3 = self._make_layer(block,
self.output_channel_blocks[2],
layers[2],
stride=1)
self.conv3 = nn.Conv2d(
self.output_channel_blocks[2],
self.output_channel_blocks[2],
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn3 = nn.BatchNorm2d(self.output_channel_blocks[2])
self.layer4 = self._make_layer(block,
self.output_channel_blocks[3],
layers[3],
stride=1)
self.conv4_1 = nn.Conv2d(
self.output_channel_blocks[3],
self.output_channel_blocks[3],
kernel_size=2,
stride=(2, 1),
padding=(0, 1),
bias=False,
)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_blocks[3])
self.conv4_2 = nn.Conv2d(
self.output_channel_blocks[3],
self.output_channel_blocks[3],
kernel_size=2,
stride=1,
padding=0,
bias=False,
)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_blocks[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| import torch.nn as nn
class VGGFeatureExtractor(nn.Module):
""" FeatureExtractor of CRNN (https://arxiv.org/pdf/1507.05717.pdf) """
def __init__(self,
n_input_channels: int = 1,
n_output_channels: int = 512,
opt2val=None):
super(VGGFeatureExtractor, self).__init__()
self.output_channel = [
int(n_output_channels / 8),
int(n_output_channels / 4),
int(n_output_channels / 2),
n_output_channels,
] # [64, 128, 256, 512]
rec_model_ckpt_fp = opt2val["rec_model_ckpt_fp"]
if "baseline" in rec_model_ckpt_fp:
self.ConvNet = nn.Sequential(
nn.Conv2d(n_input_channels, self.output_channel[0], 3, 1, 1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1,
1),
nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
# nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
nn.ConvTranspose2d(self.output_channel[3],
self.output_channel[3], 2, 2),
nn.ReLU(True),
) # 512x4x50
else:
self.ConvNet = nn.Sequential(
nn.Conv2d(n_input_channels, self.output_channel[0], 3, 1, 1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1,
1),
nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1,
1),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3],
self.output_channel[3],
3,
1,
1,
bias=False),
nn.BatchNorm2d(self.output_channel[3]),
nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
# nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
nn.ConvTranspose2d(self.output_channel[3],
self.output_channel[3], 2, 2),
nn.ReLU(True), # 512x4x50
nn.ConvTranspose2d(self.output_channel[3],
self.output_channel[3], 2, 2),
nn.ReLU(True),
) # 512x4x50
def forward(self, x):
return self.ConvNet(x)
class ResNetFeatureExtractor(nn.Module):
"""
FeatureExtractor of FAN
(http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf)
"""
def __init__(self, n_input_channels: int = 1, n_output_channels: int = 512):
super(ResNetFeatureExtractor, self).__init__()
self.ConvNet = ResNet(n_input_channels, n_output_channels, BasicBlock,
[1, 2, 5, 3])
def forward(self, inputs):
return self.ConvNet(inputs)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self._conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def _conv3x3(self, in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, n_input_channels: int, n_output_channels: int, block,
layers):
"""
:param n_input_channels (int): The number of input channels of the feature extractor
:param n_output_channels (int): The number of output channels of the feature extractor
:param block:
:param layers:
"""
super(ResNet, self).__init__()
self.output_channel_blocks = [
int(n_output_channels / 4),
int(n_output_channels / 2),
n_output_channels,
n_output_channels,
]
self.inplanes = int(n_output_channels / 8)
self.conv0_1 = nn.Conv2d(
n_input_channels,
int(n_output_channels / 16),
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn0_1 = nn.BatchNorm2d(int(n_output_channels / 16))
self.conv0_2 = nn.Conv2d(
int(n_output_channels / 16),
self.inplanes,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_blocks[0],
layers[0])
self.conv1 = nn.Conv2d(
self.output_channel_blocks[0],
self.output_channel_blocks[0],
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(self.output_channel_blocks[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block,
self.output_channel_blocks[1],
layers[1],
stride=1)
self.conv2 = nn.Conv2d(
self.output_channel_blocks[1],
self.output_channel_blocks[1],
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn2 = nn.BatchNorm2d(self.output_channel_blocks[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2,
stride=(2, 1),
padding=(0, 1))
self.layer3 = self._make_layer(block,
self.output_channel_blocks[2],
layers[2],
stride=1)
self.conv3 = nn.Conv2d(
self.output_channel_blocks[2],
self.output_channel_blocks[2],
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn3 = nn.BatchNorm2d(self.output_channel_blocks[2])
self.layer4 = self._make_layer(block,
self.output_channel_blocks[3],
layers[3],
stride=1)
self.conv4_1 = nn.Conv2d(
self.output_channel_blocks[3],
self.output_channel_blocks[3],
kernel_size=2,
stride=(2, 1),
padding=(0, 1),
bias=False,
)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_blocks[3])
self.conv4_2 = nn.Conv2d(
self.output_channel_blocks[3],
self.output_channel_blocks[3],
kernel_size=2,
stride=1,
padding=0,
bias=False,
)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_blocks[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| en | 0.385396 | FeatureExtractor of CRNN (https://arxiv.org/pdf/1507.05717.pdf) # [64, 128, 256, 512] # 64x16x50 # 128x8x25 # 256x8x25 # 256x4x25 # 512x4x25 # 512x2x25 # nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24 # 512x4x50 # 64x16x50 # 128x8x25 # 256x8x25 # 256x4x25 # 512x4x25 # 512x2x25 # nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24 # 512x4x50 # 512x4x50 FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf) :param n_input_channels (int): The number of input channels of the feature extractor :param n_output_channels (int): The number of output channels of the feature extractor :param block: :param layers: | 2.5615 | 3 |
lambeq/text2diagram/ccg_parser.py | CQCL/lambeq | 131 | 6630143 | # Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
__all__ = ['CCGParser']
import sys
from abc import abstractmethod
from typing import Any, Optional
from discopy import Diagram
from tqdm.autonotebook import tqdm
from lambeq.core.globals import VerbosityLevel
from lambeq.core.utils import (SentenceBatchType, SentenceType,
tokenised_sentence_type_check)
from lambeq.text2diagram.base import Reader
from lambeq.text2diagram.ccg_tree import CCGTree
class CCGParser(Reader):
"""Base class for CCG parsers."""
verbose = VerbosityLevel.SUPPRESS.value
@abstractmethod
def __init__(self,
verbose: str = VerbosityLevel.SUPPRESS.value,
**kwargs: Any) -> None:
"""Initialise the CCG parser."""
@abstractmethod
def sentences2trees(
self,
sentences: SentenceBatchType,
tokenised: bool = False,
suppress_exceptions: bool = False,
verbose: Optional[str] = None) -> list[Optional[CCGTree]]:
"""Parse multiple sentences into a list of :py:class:`.CCGTree` s.
Parameters
----------
sentences : list of str, or list of list of str
The sentences to be parsed, passed either as strings or as lists
of tokens.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if a
sentence fails to parse, instead of raising an exception,
its return entry is :py:obj:`None`.
tokenised : bool, default: False
Whether each sentence has been passed as a list of tokens.
verbose : str, optional
See :py:class:`VerbosityLevel` for options. Not all parsers
implement all three levels of progress reporting, see the
respective documentation for each parser. If set, takes priority
over the :py:attr:`verbose` attribute of the parser.
Returns
-------
list of CCGTree or None
The parsed trees. May contain :py:obj:`None` if exceptions
are suppressed.
"""
def sentence2tree(self,
sentence: SentenceType,
tokenised: bool = False,
suppress_exceptions: bool = False) -> Optional[CCGTree]:
"""Parse a sentence into a :py:class:`.CCGTree`.
Parameters
----------
sentence : str, list[str]
The sentence to be parsed, passed either as a string, or as a list
of tokens.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if
the sentence fails to parse, instead of raising an
exception, returns :py:obj:`None`.
tokenised : bool, default: False
Whether the sentence has been passed as a list of tokens.
Returns
-------
CCGTree or None
The parsed tree, or :py:obj:`None` on failure.
"""
if tokenised:
if not tokenised_sentence_type_check(sentence):
raise ValueError('`tokenised` set to `True`, but variable '
'`sentence` does not have type '
'`list[str]`.')
sent: list[str] = [str(token) for token in sentence]
return self.sentences2trees(
[sent],
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
else:
if not isinstance(sentence, str):
raise ValueError('`tokenised` set to `False`, but variable '
'`sentence` does not have type `str`.')
return self.sentences2trees(
[sentence],
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
def sentences2diagrams(
self,
sentences: SentenceBatchType,
tokenised: bool = False,
planar: bool = False,
suppress_exceptions: bool = False,
verbose: Optional[str] = None) -> list[Optional[Diagram]]:
"""Parse multiple sentences into a list of discopy diagrams.
Parameters
----------
sentences : list of str, or list of list of str
The sentences to be parsed.
planar : bool, default: False
Force diagrams to be planar when they contain
crossed composition.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if a
sentence fails to parse, instead of raising an exception,
its return entry is :py:obj:`None`.
tokenised : bool, default: False
Whether each sentence has been passed as a list of tokens.
verbose : str, optional
See :py:class:`VerbosityLevel` for options. Not all parsers
implement all three levels of progress reporting, see the
respective documentation for each parser. If set, takes priority
over the :py:attr:`verbose` attribute of the parser.
Returns
-------
list of discopy.Diagram or None
The parsed diagrams. May contain :py:obj:`None` if
exceptions are suppressed.
"""
trees = self.sentences2trees(sentences,
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=verbose)
diagrams = []
if verbose is None:
verbose = self.verbose
if verbose is VerbosityLevel.TEXT.value:
print('Turning parse trees to diagrams.', file=sys.stderr)
for tree in tqdm(
trees,
desc='Parse trees to diagrams',
leave=False,
disable=verbose != VerbosityLevel.PROGRESS.value):
if tree is not None:
try:
diagrams.append(tree.to_diagram(planar=planar))
except Exception as e:
if suppress_exceptions:
diagrams.append(None)
else:
raise e
else:
diagrams.append(None)
return diagrams
def sentence2diagram(
self,
sentence: SentenceType,
tokenised: bool = False,
planar: bool = False,
suppress_exceptions: bool = False) -> Optional[Diagram]:
"""Parse a sentence into a DisCoPy diagram.
Parameters
----------
sentence : str or list of str
The sentence to be parsed.
planar : bool, default: False
Force diagrams to be planar when they contain
crossed composition.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if
the sentence fails to parse, instead of raising an
exception, returns :py:obj:`None`.
tokenised : bool, default: False
Whether the sentence has been passed as a list of tokens.
Returns
-------
discopy.Diagram or None
The parsed diagram, or :py:obj:`None` on failure.
"""
if tokenised:
if not tokenised_sentence_type_check(sentence):
raise ValueError('`tokenised` set to `True`, but variable '
'`sentence` does not have type '
'`list[str]`.')
sent: list[str] = [str(token) for token in sentence]
return self.sentences2diagrams(
[sent],
planar=planar,
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
else:
if not isinstance(sentence, str):
raise ValueError('`tokenised` set to `False`, but variable '
'`sentence` does not have type `str`.')
return self.sentences2diagrams(
[sentence],
planar=planar,
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
| # Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
__all__ = ['CCGParser']
import sys
from abc import abstractmethod
from typing import Any, Optional
from discopy import Diagram
from tqdm.autonotebook import tqdm
from lambeq.core.globals import VerbosityLevel
from lambeq.core.utils import (SentenceBatchType, SentenceType,
tokenised_sentence_type_check)
from lambeq.text2diagram.base import Reader
from lambeq.text2diagram.ccg_tree import CCGTree
class CCGParser(Reader):
"""Base class for CCG parsers."""
verbose = VerbosityLevel.SUPPRESS.value
@abstractmethod
def __init__(self,
verbose: str = VerbosityLevel.SUPPRESS.value,
**kwargs: Any) -> None:
"""Initialise the CCG parser."""
@abstractmethod
def sentences2trees(
self,
sentences: SentenceBatchType,
tokenised: bool = False,
suppress_exceptions: bool = False,
verbose: Optional[str] = None) -> list[Optional[CCGTree]]:
"""Parse multiple sentences into a list of :py:class:`.CCGTree` s.
Parameters
----------
sentences : list of str, or list of list of str
The sentences to be parsed, passed either as strings or as lists
of tokens.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if a
sentence fails to parse, instead of raising an exception,
its return entry is :py:obj:`None`.
tokenised : bool, default: False
Whether each sentence has been passed as a list of tokens.
verbose : str, optional
See :py:class:`VerbosityLevel` for options. Not all parsers
implement all three levels of progress reporting, see the
respective documentation for each parser. If set, takes priority
over the :py:attr:`verbose` attribute of the parser.
Returns
-------
list of CCGTree or None
The parsed trees. May contain :py:obj:`None` if exceptions
are suppressed.
"""
def sentence2tree(self,
sentence: SentenceType,
tokenised: bool = False,
suppress_exceptions: bool = False) -> Optional[CCGTree]:
"""Parse a sentence into a :py:class:`.CCGTree`.
Parameters
----------
sentence : str, list[str]
The sentence to be parsed, passed either as a string, or as a list
of tokens.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if
the sentence fails to parse, instead of raising an
exception, returns :py:obj:`None`.
tokenised : bool, default: False
Whether the sentence has been passed as a list of tokens.
Returns
-------
CCGTree or None
The parsed tree, or :py:obj:`None` on failure.
"""
if tokenised:
if not tokenised_sentence_type_check(sentence):
raise ValueError('`tokenised` set to `True`, but variable '
'`sentence` does not have type '
'`list[str]`.')
sent: list[str] = [str(token) for token in sentence]
return self.sentences2trees(
[sent],
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
else:
if not isinstance(sentence, str):
raise ValueError('`tokenised` set to `False`, but variable '
'`sentence` does not have type `str`.')
return self.sentences2trees(
[sentence],
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
def sentences2diagrams(
self,
sentences: SentenceBatchType,
tokenised: bool = False,
planar: bool = False,
suppress_exceptions: bool = False,
verbose: Optional[str] = None) -> list[Optional[Diagram]]:
"""Parse multiple sentences into a list of discopy diagrams.
Parameters
----------
sentences : list of str, or list of list of str
The sentences to be parsed.
planar : bool, default: False
Force diagrams to be planar when they contain
crossed composition.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if a
sentence fails to parse, instead of raising an exception,
its return entry is :py:obj:`None`.
tokenised : bool, default: False
Whether each sentence has been passed as a list of tokens.
verbose : str, optional
See :py:class:`VerbosityLevel` for options. Not all parsers
implement all three levels of progress reporting, see the
respective documentation for each parser. If set, takes priority
over the :py:attr:`verbose` attribute of the parser.
Returns
-------
list of discopy.Diagram or None
The parsed diagrams. May contain :py:obj:`None` if
exceptions are suppressed.
"""
trees = self.sentences2trees(sentences,
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=verbose)
diagrams = []
if verbose is None:
verbose = self.verbose
if verbose is VerbosityLevel.TEXT.value:
print('Turning parse trees to diagrams.', file=sys.stderr)
for tree in tqdm(
trees,
desc='Parse trees to diagrams',
leave=False,
disable=verbose != VerbosityLevel.PROGRESS.value):
if tree is not None:
try:
diagrams.append(tree.to_diagram(planar=planar))
except Exception as e:
if suppress_exceptions:
diagrams.append(None)
else:
raise e
else:
diagrams.append(None)
return diagrams
def sentence2diagram(
self,
sentence: SentenceType,
tokenised: bool = False,
planar: bool = False,
suppress_exceptions: bool = False) -> Optional[Diagram]:
"""Parse a sentence into a DisCoPy diagram.
Parameters
----------
sentence : str or list of str
The sentence to be parsed.
planar : bool, default: False
Force diagrams to be planar when they contain
crossed composition.
suppress_exceptions : bool, default: False
Whether to suppress exceptions. If :py:obj:`True`, then if
the sentence fails to parse, instead of raising an
exception, returns :py:obj:`None`.
tokenised : bool, default: False
Whether the sentence has been passed as a list of tokens.
Returns
-------
discopy.Diagram or None
The parsed diagram, or :py:obj:`None` on failure.
"""
if tokenised:
if not tokenised_sentence_type_check(sentence):
raise ValueError('`tokenised` set to `True`, but variable '
'`sentence` does not have type '
'`list[str]`.')
sent: list[str] = [str(token) for token in sentence]
return self.sentences2diagrams(
[sent],
planar=planar,
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
else:
if not isinstance(sentence, str):
raise ValueError('`tokenised` set to `False`, but variable '
'`sentence` does not have type `str`.')
return self.sentences2diagrams(
[sentence],
planar=planar,
suppress_exceptions=suppress_exceptions,
tokenised=tokenised,
verbose=VerbosityLevel.SUPPRESS.value)[0]
| en | 0.710608 | # Copyright 2021, 2022 Cambridge Quantum Computing Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Base class for CCG parsers. Initialise the CCG parser. Parse multiple sentences into a list of :py:class:`.CCGTree` s. Parameters ---------- sentences : list of str, or list of list of str The sentences to be parsed, passed either as strings or as lists of tokens. suppress_exceptions : bool, default: False Whether to suppress exceptions. If :py:obj:`True`, then if a sentence fails to parse, instead of raising an exception, its return entry is :py:obj:`None`. tokenised : bool, default: False Whether each sentence has been passed as a list of tokens. verbose : str, optional See :py:class:`VerbosityLevel` for options. Not all parsers implement all three levels of progress reporting, see the respective documentation for each parser. If set, takes priority over the :py:attr:`verbose` attribute of the parser. Returns ------- list of CCGTree or None The parsed trees. May contain :py:obj:`None` if exceptions are suppressed. Parse a sentence into a :py:class:`.CCGTree`. Parameters ---------- sentence : str, list[str] The sentence to be parsed, passed either as a string, or as a list of tokens. suppress_exceptions : bool, default: False Whether to suppress exceptions. If :py:obj:`True`, then if the sentence fails to parse, instead of raising an exception, returns :py:obj:`None`. tokenised : bool, default: False Whether the sentence has been passed as a list of tokens. Returns ------- CCGTree or None The parsed tree, or :py:obj:`None` on failure. Parse multiple sentences into a list of discopy diagrams. Parameters ---------- sentences : list of str, or list of list of str The sentences to be parsed. planar : bool, default: False Force diagrams to be planar when they contain crossed composition. suppress_exceptions : bool, default: False Whether to suppress exceptions. If :py:obj:`True`, then if a sentence fails to parse, instead of raising an exception, its return entry is :py:obj:`None`. tokenised : bool, default: False Whether each sentence has been passed as a list of tokens. verbose : str, optional See :py:class:`VerbosityLevel` for options. Not all parsers implement all three levels of progress reporting, see the respective documentation for each parser. If set, takes priority over the :py:attr:`verbose` attribute of the parser. Returns ------- list of discopy.Diagram or None The parsed diagrams. May contain :py:obj:`None` if exceptions are suppressed. Parse a sentence into a DisCoPy diagram. Parameters ---------- sentence : str or list of str The sentence to be parsed. planar : bool, default: False Force diagrams to be planar when they contain crossed composition. suppress_exceptions : bool, default: False Whether to suppress exceptions. If :py:obj:`True`, then if the sentence fails to parse, instead of raising an exception, returns :py:obj:`None`. tokenised : bool, default: False Whether the sentence has been passed as a list of tokens. Returns ------- discopy.Diagram or None The parsed diagram, or :py:obj:`None` on failure. | 2.306332 | 2 |
problems/max_points_in_circle.py | panc86/coding-problems | 0 | 6630144 | <gh_stars>0
# Problem
# There are N given points (o to N-1) on a plane. The K-th point is located at coordinates (X[K], Y[K]) and its tag is S[K]. We want to draw a circle centered on coordinates (0, 0). The circle should not contain two points with the same tag. What is the maximum number of points that can lie inside the circle?
# Write a function that, given a string S of length N and two arrays X, Y consisting of N integers each, returns the maxium number of points inside the circle. The circle may contain only points with distinct tags, and centered on coordinates (0, 0). Points that are on the border of the circle are included within it.
# Examples
# 1: Given S = 'ABDCA', X = [2, -1, -4, -3, 3], Y = [2, -2, 4, 1, -3], the function should return 3. There are three points that can be included in the circle: ('A', 2, 2), ('B', -1, -2), ('C', -3, 1). The next point ('A', 3, -3) has the same tag as ('A', 2, 2), so it cannot be included.
# 2: Given S = 'ABB', X = [1, -2, -2], Y = [1, -2, 2], the function should return 1. There are two points that cannot be included in the circle: ('B', -2, -2), ('B', -2, 2). They both have the tag 'B' and the same distance from coordinates (0, 0).
# 3: Given S = 'CCD', X = [1, -1, 2], Y = [1, -1, 2], the function should return 0. The points with tag 'C' have the same distance from coordinates (0, 0). (both C's exclude themselves leaving the circle without points)
# Solution
# 1. compute distances
# 2. find max hypotenuse `sqrt(a^2 + b^2)`
# 3. break collection when duplicate tag is found
# 4. remove outliers from result
import math
def hypotenuse(x, y):
return math.sqrt(abs(x)**2 + abs(y)**2)
def remove_outliers(points, threshold):
outliers = [t for t, r in points.items() if r > threshold]
for outlier in outliers:
del points[outlier]
def solution(S, X, Y):
points = dict()
for tag, x, y in zip(S, X, Y):
h = hypotenuse(x, y)
# stop if tag exists
if tag in points:
break
# assign hypotenuse to tag
points[tag] = h
# remove anything further than h
remove_outliers(points, h)
# number of unique points in circle
result = len(points)
return result if result > 1 else 0
| # Problem
# There are N given points (o to N-1) on a plane. The K-th point is located at coordinates (X[K], Y[K]) and its tag is S[K]. We want to draw a circle centered on coordinates (0, 0). The circle should not contain two points with the same tag. What is the maximum number of points that can lie inside the circle?
# Write a function that, given a string S of length N and two arrays X, Y consisting of N integers each, returns the maxium number of points inside the circle. The circle may contain only points with distinct tags, and centered on coordinates (0, 0). Points that are on the border of the circle are included within it.
# Examples
# 1: Given S = 'ABDCA', X = [2, -1, -4, -3, 3], Y = [2, -2, 4, 1, -3], the function should return 3. There are three points that can be included in the circle: ('A', 2, 2), ('B', -1, -2), ('C', -3, 1). The next point ('A', 3, -3) has the same tag as ('A', 2, 2), so it cannot be included.
# 2: Given S = 'ABB', X = [1, -2, -2], Y = [1, -2, 2], the function should return 1. There are two points that cannot be included in the circle: ('B', -2, -2), ('B', -2, 2). They both have the tag 'B' and the same distance from coordinates (0, 0).
# 3: Given S = 'CCD', X = [1, -1, 2], Y = [1, -1, 2], the function should return 0. The points with tag 'C' have the same distance from coordinates (0, 0). (both C's exclude themselves leaving the circle without points)
# Solution
# 1. compute distances
# 2. find max hypotenuse `sqrt(a^2 + b^2)`
# 3. break collection when duplicate tag is found
# 4. remove outliers from result
import math
def hypotenuse(x, y):
return math.sqrt(abs(x)**2 + abs(y)**2)
def remove_outliers(points, threshold):
outliers = [t for t, r in points.items() if r > threshold]
for outlier in outliers:
del points[outlier]
def solution(S, X, Y):
points = dict()
for tag, x, y in zip(S, X, Y):
h = hypotenuse(x, y)
# stop if tag exists
if tag in points:
break
# assign hypotenuse to tag
points[tag] = h
# remove anything further than h
remove_outliers(points, h)
# number of unique points in circle
result = len(points)
return result if result > 1 else 0 | en | 0.847273 | # Problem # There are N given points (o to N-1) on a plane. The K-th point is located at coordinates (X[K], Y[K]) and its tag is S[K]. We want to draw a circle centered on coordinates (0, 0). The circle should not contain two points with the same tag. What is the maximum number of points that can lie inside the circle? # Write a function that, given a string S of length N and two arrays X, Y consisting of N integers each, returns the maxium number of points inside the circle. The circle may contain only points with distinct tags, and centered on coordinates (0, 0). Points that are on the border of the circle are included within it. # Examples # 1: Given S = 'ABDCA', X = [2, -1, -4, -3, 3], Y = [2, -2, 4, 1, -3], the function should return 3. There are three points that can be included in the circle: ('A', 2, 2), ('B', -1, -2), ('C', -3, 1). The next point ('A', 3, -3) has the same tag as ('A', 2, 2), so it cannot be included. # 2: Given S = 'ABB', X = [1, -2, -2], Y = [1, -2, 2], the function should return 1. There are two points that cannot be included in the circle: ('B', -2, -2), ('B', -2, 2). They both have the tag 'B' and the same distance from coordinates (0, 0). # 3: Given S = 'CCD', X = [1, -1, 2], Y = [1, -1, 2], the function should return 0. The points with tag 'C' have the same distance from coordinates (0, 0). (both C's exclude themselves leaving the circle without points) # Solution # 1. compute distances # 2. find max hypotenuse `sqrt(a^2 + b^2)` # 3. break collection when duplicate tag is found # 4. remove outliers from result # stop if tag exists # assign hypotenuse to tag # remove anything further than h # number of unique points in circle | 4.093412 | 4 |
interpreter/apps.py | fossabot/open-decision | 0 | 6630145 | <filename>interpreter/apps.py<gh_stars>0
from django.apps import AppConfig
class InterpreterConfig(AppConfig):
name = 'interpreter'
| <filename>interpreter/apps.py<gh_stars>0
from django.apps import AppConfig
class InterpreterConfig(AppConfig):
name = 'interpreter'
| none | 1 | 1.317445 | 1 |
|
Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/BreakingTheRecords.py | akkik04/Python-DataStructures-and-Algorithms | 1 | 6630146 | <reponame>akkik04/Python-DataStructures-and-Algorithms<filename>Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/BreakingTheRecords.py<gh_stars>1-10
# BREAKING THE RECORDS HACKERRANK SOLUTION:
# creating a function to calculate amount of times the max and min records were broken.
def breakingRecords(scores):
# creating variables and initially declaring the minimum and maximum values to the first value of the array.
minimum = scores[0]
maximum = scores[0]
# creating variables to store the count for the max and min values being broken.
max_broken_count = 0
min_broken_count = 0
# creating a for-loop to iterate for the length of the array.
for i in range(len(scores)):
# creating an if-statement to determine when to increment the count for the max and min values being broken.
if scores[i] > maximum:
# re-writing the new maximum if a higher value is found.
maximum = scores[i]
# incrementing the count for number of times the max score was broken.
max_broken_count += 1
elif scores[i] < minimum:
# rewriting the new minimum if a lower value is found.
minimum = scores[i]
# incrementing the count for the number of times the min score was broken.
min_broken_count += 1
# code to return the max and min counts.
return [max_broken_count, min_broken_count]
# receiving input.
n = int(input().strip())
scores = list(map(int, input().rstrip().split()))
# code to print the final output, which indicates the number of times the max and min score was broken.
result = breakingRecords(scores)
print(result) | Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/BreakingTheRecords.py<gh_stars>1-10
# BREAKING THE RECORDS HACKERRANK SOLUTION:
# creating a function to calculate amount of times the max and min records were broken.
def breakingRecords(scores):
# creating variables and initially declaring the minimum and maximum values to the first value of the array.
minimum = scores[0]
maximum = scores[0]
# creating variables to store the count for the max and min values being broken.
max_broken_count = 0
min_broken_count = 0
# creating a for-loop to iterate for the length of the array.
for i in range(len(scores)):
# creating an if-statement to determine when to increment the count for the max and min values being broken.
if scores[i] > maximum:
# re-writing the new maximum if a higher value is found.
maximum = scores[i]
# incrementing the count for number of times the max score was broken.
max_broken_count += 1
elif scores[i] < minimum:
# rewriting the new minimum if a lower value is found.
minimum = scores[i]
# incrementing the count for the number of times the min score was broken.
min_broken_count += 1
# code to return the max and min counts.
return [max_broken_count, min_broken_count]
# receiving input.
n = int(input().strip())
scores = list(map(int, input().rstrip().split()))
# code to print the final output, which indicates the number of times the max and min score was broken.
result = breakingRecords(scores)
print(result) | en | 0.869266 | # BREAKING THE RECORDS HACKERRANK SOLUTION: # creating a function to calculate amount of times the max and min records were broken. # creating variables and initially declaring the minimum and maximum values to the first value of the array. # creating variables to store the count for the max and min values being broken. # creating a for-loop to iterate for the length of the array. # creating an if-statement to determine when to increment the count for the max and min values being broken. # re-writing the new maximum if a higher value is found. # incrementing the count for number of times the max score was broken. # rewriting the new minimum if a lower value is found. # incrementing the count for the number of times the min score was broken. # code to return the max and min counts. # receiving input. # code to print the final output, which indicates the number of times the max and min score was broken. | 3.843082 | 4 |
epa/forms.py | deedee/epa-admin | 0 | 6630147 |
from django.forms import ModelForm, Form, ValidationError, CharField, EmailField, PasswordInput, \
IntegerField, HiddenInput
from django.core.exceptions import FieldError
from django.contrib.auth.models import User
from epa.models import UploadData
from EPA_Admin.settings import EPA_ALLOWABLE_FILE_TYPES, EPA_IMAGE_VALID_HEADER, FILE_IMAGE
from epa import helper
import datetime
import imghdr
class UploadDataForm(ModelForm):
class Meta:
model = UploadData
fields = '__all__'
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UploadDataForm, self).__init__(*args, **kwargs)
def clean_file(self):
uploaded_file = self.cleaned_data['file']
if uploaded_file:
if not helper.file_checker(uploaded_file.name, EPA_ALLOWABLE_FILE_TYPES):
raise ValidationError("only accept image and txt file")
_file = uploaded_file.name.split('/')
self.data['file_name'] = _file[len(_file) - 1]
self.data['date'] = datetime.datetime.now()
if helper.file_checker(self.data['file_name'], [FILE_IMAGE]):
self.data['type'] = 0
else:
self.data['type'] = 1
if self.request:
self.data['user'] = self.request.user.id
return uploaded_file
def save(self):
model = super(UploadDataForm, self).save()
if model.type == 0:
if imghdr.what(model.file.path) not in EPA_IMAGE_VALID_HEADER:
model.delete()
raise FieldError('Image file is not valid')
super(UploadDataForm, self).save()
class LoginForm(Form):
username = CharField()
password = CharField(widget=PasswordInput())
next = CharField(widget=HiddenInput, required=False, initial='/')
class RegisterForm(Form):
id = IntegerField(required=False, widget=HiddenInput())
username = CharField(min_length=3)
old_password = CharField(min_length=3, widget=PasswordInput(), required=False)
password = CharField(min_length=3, widget=PasswordInput())
password2 = CharField(min_length=3, widget=PasswordInput())
email = EmailField()
new_user = True
def clean_email(self):
"""
Validate email field
"""
_email = self.cleaned_data['email']
user = User.objects.filter(email=_email)
if user.count() == 1:
if self.new_user or (not self.new_user and user[0].id != int(self.cleaned_data['id'])):
raise ValidationError('Email is already used')
elif user.count() > 1:
raise ValidationError('Email is already used')
return _email
def clean(self):
"""
Other validation rules before we save it
"""
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('<PASSWORD>')
if password and password != password2:
raise ValidationError("Passwords don't match")
return self.cleaned_data
|
from django.forms import ModelForm, Form, ValidationError, CharField, EmailField, PasswordInput, \
IntegerField, HiddenInput
from django.core.exceptions import FieldError
from django.contrib.auth.models import User
from epa.models import UploadData
from EPA_Admin.settings import EPA_ALLOWABLE_FILE_TYPES, EPA_IMAGE_VALID_HEADER, FILE_IMAGE
from epa import helper
import datetime
import imghdr
class UploadDataForm(ModelForm):
class Meta:
model = UploadData
fields = '__all__'
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UploadDataForm, self).__init__(*args, **kwargs)
def clean_file(self):
uploaded_file = self.cleaned_data['file']
if uploaded_file:
if not helper.file_checker(uploaded_file.name, EPA_ALLOWABLE_FILE_TYPES):
raise ValidationError("only accept image and txt file")
_file = uploaded_file.name.split('/')
self.data['file_name'] = _file[len(_file) - 1]
self.data['date'] = datetime.datetime.now()
if helper.file_checker(self.data['file_name'], [FILE_IMAGE]):
self.data['type'] = 0
else:
self.data['type'] = 1
if self.request:
self.data['user'] = self.request.user.id
return uploaded_file
def save(self):
model = super(UploadDataForm, self).save()
if model.type == 0:
if imghdr.what(model.file.path) not in EPA_IMAGE_VALID_HEADER:
model.delete()
raise FieldError('Image file is not valid')
super(UploadDataForm, self).save()
class LoginForm(Form):
username = CharField()
password = CharField(widget=PasswordInput())
next = CharField(widget=HiddenInput, required=False, initial='/')
class RegisterForm(Form):
id = IntegerField(required=False, widget=HiddenInput())
username = CharField(min_length=3)
old_password = CharField(min_length=3, widget=PasswordInput(), required=False)
password = CharField(min_length=3, widget=PasswordInput())
password2 = CharField(min_length=3, widget=PasswordInput())
email = EmailField()
new_user = True
def clean_email(self):
"""
Validate email field
"""
_email = self.cleaned_data['email']
user = User.objects.filter(email=_email)
if user.count() == 1:
if self.new_user or (not self.new_user and user[0].id != int(self.cleaned_data['id'])):
raise ValidationError('Email is already used')
elif user.count() > 1:
raise ValidationError('Email is already used')
return _email
def clean(self):
"""
Other validation rules before we save it
"""
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('<PASSWORD>')
if password and password != password2:
raise ValidationError("Passwords don't match")
return self.cleaned_data
| en | 0.747174 | Validate email field Other validation rules before we save it | 2.276741 | 2 |
Assignments/Dictionaries/Exercise/10. SoftUni Exam Results.py | KaloyankerR/python-fundamentals-repository | 0 | 6630148 | <filename>Assignments/Dictionaries/Exercise/10. SoftUni Exam Results.py<gh_stars>0
results = {}
submissions = {}
tokens = input()
while tokens != "exam finished":
tokens = tokens.split("-")
user = tokens[0]
language = tokens[1]
if language == "banned":
results.pop(user)
else:
points = int(tokens[2])
if user in results.keys():
if points >= results[user]:
results[user] = points
else:
results[user] = points
if language not in submissions.keys():
submissions[language] = 1
else:
submissions[language] += 1
tokens = input()
sorted_results = dict(sorted(results.items(), key=lambda x: (-x[1], x[0])))
print("Results:")
for (k, v) in sorted_results.items():
print(f"{k} | {v}")
sorted_submissions = dict(sorted(submissions.items(), key=lambda x: (-x[1], x[0])))
print("Submissions:")
for (k, v) in sorted_submissions.items():
print(f"{k} - {v}")
| <filename>Assignments/Dictionaries/Exercise/10. SoftUni Exam Results.py<gh_stars>0
results = {}
submissions = {}
tokens = input()
while tokens != "exam finished":
tokens = tokens.split("-")
user = tokens[0]
language = tokens[1]
if language == "banned":
results.pop(user)
else:
points = int(tokens[2])
if user in results.keys():
if points >= results[user]:
results[user] = points
else:
results[user] = points
if language not in submissions.keys():
submissions[language] = 1
else:
submissions[language] += 1
tokens = input()
sorted_results = dict(sorted(results.items(), key=lambda x: (-x[1], x[0])))
print("Results:")
for (k, v) in sorted_results.items():
print(f"{k} | {v}")
sorted_submissions = dict(sorted(submissions.items(), key=lambda x: (-x[1], x[0])))
print("Submissions:")
for (k, v) in sorted_submissions.items():
print(f"{k} - {v}")
| none | 1 | 3.62351 | 4 |
|
app/tag/models.py | AndyKrivovjas/notes | 0 | 6630149 | from datetime import datetime
from django.db import models
from django.utils.timezone import now
from app.users.models import User
# Create your models here.
class Tag(models.Model):
name = models.CharField('name', max_length=255, default='')
owner = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
date_added = models.DateField('date_added', max_length=255, default=now)
date_modified = models.DateField('date_modified', max_length=255, default=now)
def __unicode__(self):
return self.name
| from datetime import datetime
from django.db import models
from django.utils.timezone import now
from app.users.models import User
# Create your models here.
class Tag(models.Model):
name = models.CharField('name', max_length=255, default='')
owner = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
date_added = models.DateField('date_added', max_length=255, default=now)
date_modified = models.DateField('date_modified', max_length=255, default=now)
def __unicode__(self):
return self.name
| en | 0.963489 | # Create your models here. | 2.325248 | 2 |
Ballistics.py | Caleb68864/Scope-Cap-Chart-Generator | 0 | 6630150 | import pandas as pd
from pathlib import Path
import os
from xlrd import open_workbook, XLRDError
class Ballistics:
def __init__(self, csv='./ballistics.csv', min_range=-1, max_range=-1, step=-1, range_col='Range', cols=[]):
csv_file = Path(csv)
if csv_file.is_file():
#print("File Found")
filename = os.path.split(csv)
ext = filename[1].split('.')
ext = ext[len(ext) - 1]
#print(filename[1], ext)
if ext == 'csv':
self.orig_ballistics = self.ballistics = pd.read_csv(csv)
elif ext == 'xls' or ext == 'xlsx':
try:
open_workbook(csv)
except XLRDError:
self.orig_ballistics = self.ballistics = pd.DataFrame()
print("Not A Invalid Excel File!")
else:
self.orig_ballistics = self.ballistics = pd.read_excel(csv)
else:
self.orig_ballistics = self.ballistics = pd.DataFrame()
print("Invalid File: Load CSV or Excel")
else:
self.orig_ballistics = self.ballistics = pd.DataFrame()
self.range_col = range_col
self.setrange(min_range, max_range)
self.selectcolumns(cols)
def setorigballistics(self, b):
self.orig_ballistics = b
def reset(self):
self.ballistics = self.orig_ballistics
def setrange(self, min_range=-1, max_range=-1, step=-1):
min_ranges = pd.DataFrame()
max_ranges = pd.DataFrame()
step_ranges = pd.DataFrame()
if max_range > 0:
max_ranges = self.ballistics[self.range_col] <= max_range
if min_range > 0:
min_ranges = self.ballistics[self.range_col] >= min_range
if step > 0:
step_ranges = self.ballistics[self.range_col] % step == 0
if not min_ranges.empty and not max_ranges.empty:
self.ballistics = self.ballistics[min_ranges & max_ranges]
elif not min_ranges.empty:
self.ballistics = self.ballistics[min_ranges]
elif not max_ranges.empty:
self.ballistics = self.ballistics[max_ranges]
if not step_ranges.empty:
self.ballistics = self.ballistics[step_ranges]
def selectcolumns(self, cols):
if len(cols) > 0:
self.ballistics = self.ballistics.iloc[:, cols]
#print(self.ballistics.iloc[:, cols])
def setrangecol(self, range_col):
if range_col:
self.range_col = range_col
def genballisticscsv(self):
csv_file = Path("./ballistics.csv")
if not csv_file.is_file():
file = open(csv_file, 'w')
file.write('Range,Velocity,Energy,Trajectory,MOA,MILS')
file.close()
else:
print("Ballistics File Exists")
def getballistics(self):
df1 = pd.read_csv('./ballistics.csv')
# df2=df1.set_index("Range")
start_range = 100
end_range = 500
range_col = 'Range'
mm_col = 'Come Up (MILS)'
# for index, row in df1.iterrows():
# if row[range_col] >= start_range and row[range_col] <= end_range:
# print(row[range_col], row[mm_col])
return df1
| import pandas as pd
from pathlib import Path
import os
from xlrd import open_workbook, XLRDError
class Ballistics:
def __init__(self, csv='./ballistics.csv', min_range=-1, max_range=-1, step=-1, range_col='Range', cols=[]):
csv_file = Path(csv)
if csv_file.is_file():
#print("File Found")
filename = os.path.split(csv)
ext = filename[1].split('.')
ext = ext[len(ext) - 1]
#print(filename[1], ext)
if ext == 'csv':
self.orig_ballistics = self.ballistics = pd.read_csv(csv)
elif ext == 'xls' or ext == 'xlsx':
try:
open_workbook(csv)
except XLRDError:
self.orig_ballistics = self.ballistics = pd.DataFrame()
print("Not A Invalid Excel File!")
else:
self.orig_ballistics = self.ballistics = pd.read_excel(csv)
else:
self.orig_ballistics = self.ballistics = pd.DataFrame()
print("Invalid File: Load CSV or Excel")
else:
self.orig_ballistics = self.ballistics = pd.DataFrame()
self.range_col = range_col
self.setrange(min_range, max_range)
self.selectcolumns(cols)
def setorigballistics(self, b):
self.orig_ballistics = b
def reset(self):
self.ballistics = self.orig_ballistics
def setrange(self, min_range=-1, max_range=-1, step=-1):
min_ranges = pd.DataFrame()
max_ranges = pd.DataFrame()
step_ranges = pd.DataFrame()
if max_range > 0:
max_ranges = self.ballistics[self.range_col] <= max_range
if min_range > 0:
min_ranges = self.ballistics[self.range_col] >= min_range
if step > 0:
step_ranges = self.ballistics[self.range_col] % step == 0
if not min_ranges.empty and not max_ranges.empty:
self.ballistics = self.ballistics[min_ranges & max_ranges]
elif not min_ranges.empty:
self.ballistics = self.ballistics[min_ranges]
elif not max_ranges.empty:
self.ballistics = self.ballistics[max_ranges]
if not step_ranges.empty:
self.ballistics = self.ballistics[step_ranges]
def selectcolumns(self, cols):
if len(cols) > 0:
self.ballistics = self.ballistics.iloc[:, cols]
#print(self.ballistics.iloc[:, cols])
def setrangecol(self, range_col):
if range_col:
self.range_col = range_col
def genballisticscsv(self):
csv_file = Path("./ballistics.csv")
if not csv_file.is_file():
file = open(csv_file, 'w')
file.write('Range,Velocity,Energy,Trajectory,MOA,MILS')
file.close()
else:
print("Ballistics File Exists")
def getballistics(self):
df1 = pd.read_csv('./ballistics.csv')
# df2=df1.set_index("Range")
start_range = 100
end_range = 500
range_col = 'Range'
mm_col = 'Come Up (MILS)'
# for index, row in df1.iterrows():
# if row[range_col] >= start_range and row[range_col] <= end_range:
# print(row[range_col], row[mm_col])
return df1
| en | 0.26886 | #print("File Found") #print(filename[1], ext) #print(self.ballistics.iloc[:, cols]) # df2=df1.set_index("Range") # for index, row in df1.iterrows(): # if row[range_col] >= start_range and row[range_col] <= end_range: # print(row[range_col], row[mm_col]) | 3.17838 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.