content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import titration.utils.analysis as analysis
import titration.utils.constants as constants
import titration.utils.devices.serial_mock as serial_mock
import titration.utils.interfaces as interfaces
class Syringe_Pump:
def __init__(self):
self.serial = serial_mock.Serial(
port=constants.ARDUINO_PORT,
baudrate=constants.ARDUINO_BAUD,
timeout=constants.ARDUINO_TIMEOUT,
)
self.volume_in_pump = constants.volume_in_pump
self.max_pump_capacity = constants.MAX_PUMP_CAPACITY
def set_volume_in_pump(self, volume):
self.volume_in_pump = volume
constants.volume_in_pump = volume
def get_volume_in_pump(self):
return self.volume_in_pump
def pump_volume(self, volume, direction):
volume_to_add = volume
# pull in solution
if direction == 0:
# check if volume to add is greater than space left
space_in_pump = self.max_pump_capacity - self.volume_in_pump
if volume_to_add > space_in_pump:
volume_to_add = self.max_pump_capacity - self.volume_in_pump
self.drive_pump(volume_to_add, direction)
# pump out solution
elif direction == 1:
# volume greater than max capacity of pump
if volume_to_add > self.max_pump_capacity:
interfaces.lcd_out(
"Volume > pumpable", style=constants.LCD_CENT_JUST, line=4
)
# pump out all current volume
next_volume = self.volume_in_pump
self.drive_pump(next_volume, 1)
# calculate new volume to add
volume_to_add = volume_to_add - next_volume
# keep pumping until full volume_to_add is met
while volume_to_add > 0:
next_volume = min(volume_to_add, self.max_pump_capacity)
self.drive_pump(next_volume, 0)
self.drive_pump(next_volume, 1)
volume_to_add -= next_volume
# volume greater than volume in pump
elif volume_to_add > self.volume_in_pump:
next_volume = self.volume_in_pump
self.drive_pump(next_volume, 1)
# calculate remaining volume to add
volume_to_add -= next_volume
self.drive_pump(volume_to_add, 0)
self.drive_pump(volume_to_add, 1)
else:
# volume less than volume in pump
self.drive_pump(volume_to_add, direction)
def drive_pump(self, volume, direction):
"""Converts volume to cycles and ensures and checks pump level and values"""
if direction == 0:
space_in_pump = self.max_pump_capacity - self.volume_in_pump
if volume > space_in_pump:
interfaces.lcd_out("Filling Error", line=4)
else:
interfaces.lcd_out("Filling {0:1.2f} ml".format(volume), line=4)
cycles = analysis.determine_pump_cycles(volume)
self.drive_step_stick(cycles, direction)
self.volume_in_pump += volume
elif direction == 1:
if volume > self.volume_in_pump:
interfaces.lcd_out("Pumping Error", line=4)
else:
interfaces.lcd_out("Pumping {0:1.2f} ml".format(volume), line=4)
cycles = analysis.determine_pump_cycles(volume)
offset = self.drive_step_stick(cycles, direction)
# offset is what is returned from drive_step_stick which originally is returned from the arduino
if offset != 0:
self.drive_step_stick(offset, 0)
self.drive_step_stick(offset, 1)
self.volume_in_pump -= volume
interfaces.lcd_out("Pump Vol: {0:1.2f} ml".format(self.volume_in_pump), line=4)
def drive_step_stick(self, cycles, direction):
"""
cycles and direction are integers
Communicates with arduino to add HCl through pump
:param cycles: number of rising edges for the pump
:param direction: direction of pump
"""
if cycles == 0:
return 0
if self.serial.writable():
self.serial.write(cycles.to_bytes(4, "little"))
self.serial.write(direction.to_bytes(1, "little"))
self.serial.flush()
temp = self.serial.readline()
if temp == b"DONE\r\n" or temp == b"":
return 0
else:
return int(temp)
else:
interfaces.lcd_out("Arduino Unavailable", 4, constants.LCD_CENT_JUST)
| 39.689076 | 112 | 0.597925 | [
"MIT"
] | KonradMcClure/AlkalinityTitrator | titration/utils/devices/syringe_pump_mock.py | 4,723 | Python |
from typing import TYPE_CHECKING
from loopchain.jsonrpc.exception import GenericJsonRpcServerError
from loopchain.blockchain.blocks import BlockBuilder, BlockVerifier as BaseBlockVerifier
from loopchain.blockchain.blocks.v0_1a import BlockHeader
from loopchain.blockchain.exception import ScoreInvokeError, ScoreInvokeResultError
if TYPE_CHECKING:
from loopchain.blockchain.types import ExternalAddress
from loopchain.blockchain.blocks import Block
from loopchain.blockchain.blocks.v0_1a import BlockBody
class BlockVerifier(BaseBlockVerifier):
version = BlockHeader.version
def _verify_common(self, block: 'Block', prev_block: 'Block', **kwargs):
generator: 'ExternalAddress' = kwargs.get("generator")
header: BlockHeader = block.header
body: BlockBody = block.body
builder = BlockBuilder.new(self.version, self._tx_versioner)
builder.height = header.height
builder.prev_hash = header.prev_hash
builder.fixed_timestamp = header.timestamp
for tx in body.transactions.values():
builder.transactions[tx.hash] = tx
invoke_result = None
if self.invoke_func:
self.verify_invoke(builder, block, prev_block)
builder.build_merkle_tree_root_hash()
if header.merkle_tree_root_hash != builder.merkle_tree_root_hash:
exception = RuntimeError(f"Block({header.height}, {header.hash.hex()}, "
f"MerkleTreeRootHash({header.merkle_tree_root_hash.hex()}), "
f"Expected({builder.merkle_tree_root_hash.hex()}).")
self._handle_exception(exception)
builder.build_hash()
if header.hash != builder.hash:
exception = RuntimeError(f"Block({header.height}, {header.hash.hex()}, "
f"Hash({header.hash.hex()}, "
f"Expected({builder.hash.hex()}).")
self._handle_exception(exception)
if generator:
self.verify_generator(block, generator)
return invoke_result
def verify_invoke(self, builder: 'BlockBuilder', block: 'Block', prev_block: 'Block'):
header: BlockHeader = block.header
try:
new_block, invoke_result = self.invoke_func(block, prev_block)
except GenericJsonRpcServerError as e:
if hasattr(e, 'message') and 'Failed to invoke a block' in e.message:
e = ScoreInvokeError(f"{e.message} with block({header.hash.hex()})")
self._handle_exception(e)
except Exception as e:
self._handle_exception(e)
else:
if not header.commit_state and len(block.body.transactions) == 0:
# vote block
pass
elif header.commit_state != new_block.header.commit_state:
exception = ScoreInvokeResultError(f"Block({header.height}, {header.hash.hex()}, "
f"CommitState({header.commit_state}), "
f"Expected({new_block.header.commit_state}).")
self._handle_exception(exception)
def verify_prev_block(self, block: 'Block', prev_block: 'Block'):
super().verify_prev_block(block, prev_block)
prev_block_header: BlockHeader = prev_block.header
block_header: BlockHeader = block.header
if not block_header.complained and prev_block_header.next_leader and \
prev_block_header.next_leader != block_header.peer_id:
exception = RuntimeError(f"Block({block.header.height}, {block.header.hash.hex()}, "
f"Leader({block_header.peer_id.hex_xx()}), "
f"Expected({prev_block_header.next_leader.hex_xx()}).")
self._handle_exception(exception)
def verify_generator(self, block: 'Block', generator: 'ExternalAddress'):
block_header: BlockHeader = block.header
if not block_header.complained and block.header.peer_id != generator:
exception = RuntimeError(f"Block({block.header.height}, {block.header.hash.hex()}, "
f"Generator({block.header.peer_id.hex_xx()}), "
f"Expected({generator.hex_xx()}).")
self._handle_exception(exception)
def _handle_exception(self, exception: Exception):
if self._raise_exceptions:
raise exception
else:
self.exceptions.append(exception)
| 46.494949 | 98 | 0.625244 | [
"Apache-2.0"
] | JINWOO-J/loopchain | loopchain/blockchain/blocks/v0_1a/block_verifier.py | 4,603 | Python |
# Copyright (c) 2019-2020, Manfred Moitzi
# License: MIT-License
from typing import TYPE_CHECKING, Iterable, cast, Union, List, Set
from contextlib import contextmanager
import logging
from ezdxf.lldxf import validator, const
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, RETURN_DEFAULT, group_code_mapping,
)
from ezdxf.audit import AuditError
from .dxfentity import base_class, SubclassProcessor, DXFEntity
from .dxfobj import DXFObject
from .factory import register_entity
from .objectcollection import ObjectCollection
logger = logging.getLogger('ezdxf')
if TYPE_CHECKING:
from ezdxf.eztypes import (
TagWriter, Drawing, DXFNamespace, Auditor, EntityDB,
)
__all__ = ['DXFGroup', 'GroupCollection']
acdb_group = DefSubclass('AcDbGroup', {
# Group description
'description': DXFAttr(300, default=''),
# 1 = Unnamed
# 0 = Named
'unnamed': DXFAttr(
70, default=1, validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# 1 = Selectable
# 0 = Not selectable
'selectable': DXFAttr(
71, default=1,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# 340: Hard-pointer handle to entity in group (one entry per object)
})
acdb_group_group_codes = group_code_mapping(acdb_group)
GROUP_ITEM_CODE = 340
@register_entity
class DXFGroup(DXFObject):
""" Groups are not allowed in block definitions, and each entity can only
reside in one group, so cloning of groups creates also new entities.
"""
DXFTYPE = 'GROUP'
DXFATTRIBS = DXFAttributes(base_class, acdb_group)
def __init__(self):
super().__init__()
self._handles: Set[str] = set() # only needed at the loading stage
self._data: List[DXFEntity] = []
def copy(self):
raise const.DXFTypeError('Copying of GROUP not supported.')
def load_dxf_attribs(self,
processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.fast_load_dxfattribs(
dxf, acdb_group_group_codes, 1, log=False)
self.load_group(tags)
return dxf
def load_group(self, tags):
for code, value in tags:
if code == GROUP_ITEM_CODE:
# First store handles, because at this point, objects
# are not stored in the EntityDB:
self._handles.add(value)
def preprocess_export(self, tagwriter: 'TagWriter') -> bool:
self.purge(self.doc.entitydb)
return True # export even empty groups
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_group.name)
self.dxf.export_dxf_attribs(tagwriter, [
'description', 'unnamed', 'selectable'])
self.export_group(tagwriter)
def export_group(self, tagwriter: 'TagWriter'):
for entity in self._data:
tagwriter.write_tag2(GROUP_ITEM_CODE, entity.dxf.handle)
def __iter__(self) -> Iterable[DXFEntity]:
""" Iterate over all DXF entities in :class:`DXFGroup` as instances of
:class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
"""
return (e for e in self._data if e.is_alive)
def __len__(self) -> int:
""" Returns the count of DXF entities in :class:`DXFGroup`. """
return len(self._data)
def __getitem__(self, item):
""" Returns entities by standard Python indexing and slicing. """
return self._data[item]
def __contains__(self, item: Union[str, DXFEntity]) -> bool:
""" Returns ``True`` if item is in :class:`DXFGroup`. `item` has to be
a handle string or an object of type :class:`DXFEntity` or inherited.
"""
handle = item if isinstance(item, str) else item.dxf.handle
return handle in set(self.handles())
def handles(self) -> Iterable[str]:
""" Iterable of handles of all DXF entities in :class:`DXFGroup`. """
return (entity.dxf.handle for entity in self)
def post_load_hook(self, doc: 'Drawing') -> None:
super().post_load_hook(doc)
db_get = doc.entitydb.get
def entities():
for handle in self._handles:
entity = db_get(handle)
if entity and entity.is_alive:
yield entity
try:
self.set_data(entities())
except const.DXFStructureError as e:
logger.error(str(e))
del self._handles # all referenced entities are stored in _data
@contextmanager
def edit_data(self) -> List[DXFEntity]:
""" Context manager which yields all the group entities as
standard Python list::
with group.edit_data() as data:
# add new entities to a group
data.append(modelspace.add_line((0, 0), (3, 0)))
# remove last entity from a group
data.pop()
"""
data = list(self)
yield data
self.set_data(data)
def set_data(self, entities: Iterable[DXFEntity]) -> None:
""" Set `entities` as new group content, entities should be an iterable
:class:`DXFGraphic` or inherited (LINE, CIRCLE, ...).
Raises :class:`DXFValueError` if not all entities be on the same layout
(modelspace or any paperspace layout but not block)
"""
entities = list(entities)
if not all_entities_on_same_layout(entities):
raise const.DXFStructureError(
"All entities have to be in the same layout and are not allowed"
" to be in a block layout."
)
self.clear()
self._data = entities
def extend(self, entities: Iterable[DXFEntity]) -> None:
""" Add `entities` to :class:`DXFGroup`. """
self._data.extend(entities)
def clear(self) -> None:
""" Remove all entities from :class:`DXFGroup`, does not delete any
drawing entities referenced by this group.
"""
self._data = []
def audit(self, auditor: 'Auditor') -> None:
""" Remove invalid handles from :class:`DXFGroup`.
Invalid handles are: deleted entities, not all entities in the same
layout or entities in a block layout.
"""
# Remove destroyed or invalid entities:
self.purge(auditor.entitydb)
if not all_entities_on_same_layout(self._data):
auditor.fixed_error(
code=AuditError.GROUP_ENTITIES_IN_DIFFERENT_LAYOUTS,
message=f'Cleared {str(self)}, not all entities are located in '
f'the same layout.',
)
self.clear()
def _has_valid_owner(self, entity, db: 'EntityDB') -> bool:
# no owner -> no layout association
if entity.dxf.owner is None:
return False
owner = db.get(entity.dxf.owner)
# owner does not exist or is destroyed -> no layout association
if owner is None or not owner.is_alive:
return False
# owner block_record.layout is 0 if entity is in a block definition,
# which is not allowed:
valid = owner.dxf.layout != '0'
if not valid:
logger.debug(
f"{str(entity)} in {str(self)} is located in a block layout, "
f"which is not allowed")
return valid
def _filter_invalid_entities(self, db: 'EntityDB') -> List[DXFEntity]:
assert db is not None
return [e for e in self._data
if e.is_alive and self._has_valid_owner(e, db)]
def purge(self, db: 'EntityDB') -> None:
""" Remove invalid group entities. """
self._data = self._filter_invalid_entities(db)
def all_entities_on_same_layout(entities: Iterable[DXFEntity]):
""" Check if all entities are on the same layout (model space or any paper
layout but not block).
"""
owners = set(entity.dxf.owner for entity in entities)
# 0 for no entities; 1 for all entities on the same layout
return len(owners) < 2
class GroupCollection(ObjectCollection):
def __init__(self, doc: 'Drawing'):
super().__init__(doc, dict_name='ACAD_GROUP', object_type='GROUP')
self._next_unnamed_number = 0
def groups(self) -> Iterable[DXFGroup]:
""" Iterable of all existing groups. """
for name, group in self:
yield group
def next_name(self) -> str:
name = self._next_name()
while name in self:
name = self._next_name()
return name
def _next_name(self) -> str:
self._next_unnamed_number += 1
return f"*A{self._next_unnamed_number}"
def new(self, name: str = None, description: str = "",
selectable: bool = True) -> DXFGroup:
r""" Creates a new group. If `name` is ``None`` an unnamed group is
created, which has an automatically generated name like "\*Annnn".
Args:
name: group name as string
description: group description as string
selectable: group is selectable if ``True``
"""
if name in self:
raise const.DXFValueError(f"GROUP '{name}' already exists.")
if name is None:
name = self.next_name()
unnamed = 1
else:
unnamed = 0
# The group name isn't stored in the group entity itself.
dxfattribs = {
'description': description,
'unnamed': unnamed,
'selectable': int(bool(selectable)),
}
return cast(DXFGroup, self._new(name, dxfattribs))
def delete(self, group: Union[DXFGroup, str]) -> None:
""" Delete `group`, `group` can be an object of type :class:`DXFGroup`
or a group name as string.
"""
# Delete group by name:
if isinstance(group, str):
name = group
elif group.dxftype() == 'GROUP':
name = get_group_name(group, self.entitydb)
else:
raise TypeError(group.dxftype())
if name in self:
super().delete(name)
else:
raise const.DXFValueError("GROUP not in group table registered.")
def audit(self, auditor: 'Auditor') -> None:
""" Removes empty groups and invalid handles from all groups. """
trash = []
for name, group in self:
group.audit(auditor)
if not len(group): # remove empty group
# do not delete groups while iterating over groups!
trash.append(name)
# now delete empty groups
for name in trash:
auditor.fixed_error(
code=AuditError.REMOVE_EMPTY_GROUP,
message=f'Removed empty group "{name}".',
)
self.delete(name)
def get_group_name(group: DXFGroup, db: 'EntityDB') -> str:
""" Get name of `group`. """
group_table = cast('Dictionary', db[group.dxf.owner])
for name, entity in group_table.items():
if entity is group:
return name
| 34.432927 | 80 | 0.609616 | [
"MIT"
] | dmtvanzanten/ezdxf | src/ezdxf/entities/dxfgroups.py | 11,294 | Python |
import numpy as np
import pandas as pd
from pylab import rcParams
from sklearn.metrics import mean_absolute_error, mean_squared_error
# Additional custom functions
from cases.industrial.processing import multi_automl_fit_forecast, plot_results
from fedot.core.constants import BEST_QUALITY_PRESET_NAME
from fedot.core.data.multi_modal import prepare_multimodal_data
rcParams['figure.figsize'] = 15, 7
if __name__ == '__main__':
# Below is an example of multivariate time series forecasting.
# An example of how forecasts can be made is presented and a simple
# validation is given on a single block which length is equal to the
# length of the forecast horizon.
# Define forecast horizon and read dataframe
forecast_length = 20
df = pd.read_csv('pw_dataset.csv', parse_dates=['datetime'])
# Wrap time series data into InputData class
features_to_use = ['wind_power_kWh', 'diesel_time_h', 'wind_time_h',
'velocity_max_msec', 'velocity_mean_msec', 'tmp_grad',
'diesel_fuel_kWh']
ts = np.array(df['diesel_fuel_kWh'])
mm_train, mm_test, = prepare_multimodal_data(dataframe=df,
features=features_to_use,
forecast_length=forecast_length)
# Prepare parameters for algorithm launch
# timeout 5 - means that AutoML algorithm will work for 5 minutes
timeout = 0.5
composer_params = {'max_depth': 6,
'max_arity': 3,
'pop_size': 20,
'num_of_generations': 20,
'preset': BEST_QUALITY_PRESET_NAME,
'metric': 'rmse',
'cv_folds': None,
'validation_blocks': None}
forecast, obtained_pipeline = multi_automl_fit_forecast(mm_train, mm_test,
timeout, composer_params,
ts, forecast_length,
vis=True)
mse_metric = mean_squared_error(ts[-forecast_length:], forecast, squared=False)
mae_metric = mean_absolute_error(ts[-forecast_length:], forecast)
print(f'MAE - {mae_metric:.2f}')
print(f'RMSE - {mse_metric:.2f}')
# Save obtained pipeline
obtained_pipeline.save('best')
# Visualise predictions
plot_results(actual_time_series=ts,
predicted_values=forecast,
len_train_data=len(ts) - forecast_length)
| 43.333333 | 85 | 0.608077 | [
"BSD-3-Clause"
] | vishalbelsare/FEDOT | cases/industrial/multivariate_forecasting.py | 2,600 | Python |
from werkzeug.wrappers import Response
from .application import Rocinante
from .request import Request
from .response import JSONResponse
from .router import Router
from .handler import RequestHandler
from .url import Url
from . import status
| 24.4 | 38 | 0.831967 | [
"Apache-2.0"
] | a30285/Rocinante | rocinante/__init__.py | 244 | Python |
"""
Source code for PyGMT modules.
"""
# pylint: disable=import-outside-toplevel
from pygmt.src.basemap import basemap
from pygmt.src.blockm import blockmean, blockmedian
from pygmt.src.coast import coast
from pygmt.src.colorbar import colorbar
from pygmt.src.config import config
from pygmt.src.contour import contour
from pygmt.src.grd2cpt import grd2cpt
from pygmt.src.grdcontour import grdcontour
from pygmt.src.grdcut import grdcut
from pygmt.src.grdfilter import grdfilter
from pygmt.src.grdimage import grdimage
from pygmt.src.grdinfo import grdinfo
from pygmt.src.grdtrack import grdtrack
from pygmt.src.grdview import grdview
from pygmt.src.image import image
from pygmt.src.info import info
from pygmt.src.inset import inset
from pygmt.src.legend import legend
from pygmt.src.logo import logo
from pygmt.src.makecpt import makecpt
from pygmt.src.meca import meca
from pygmt.src.plot import plot
from pygmt.src.plot3d import plot3d
from pygmt.src.rose import rose
from pygmt.src.solar import solar
from pygmt.src.subplot import set_panel, subplot
from pygmt.src.surface import surface
from pygmt.src.text import text_ as text # "text" is an argument within "text_"
from pygmt.src.which import which
from pygmt.src.x2sys_cross import x2sys_cross
from pygmt.src.x2sys_init import x2sys_init
| 35.135135 | 80 | 0.824615 | [
"BSD-3-Clause"
] | alperen-kilic/pygmt | pygmt/src/__init__.py | 1,300 | Python |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from parser_base import RegexParser
import model
class RegexSemantics(object):
def __init__(self):
super(RegexSemantics, self).__init__()
self._count = 0
def START(self, ast):
return model.Regex(ast)
def CHOICE(self, ast):
return model.Choice(ast.opts)
def SEQUENCE(self, ast):
if not ast.terms:
return model.Empty()
elif len(ast.terms) < 2:
return ast.terms[0]
else:
return model.Sequence(ast.terms)
def CLOSURE(self, ast):
return model.Closure(ast)
def SUBEXP(self, ast):
return ast
def LITERAL(self, ast):
return model.Literal(ast)
def translate(regex, trace=False):
parser = RegexParser(trace=trace, semantics=RegexSemantics())
model = parser.parse(regex, 'START')
model.set_rule_numbers()
return model.render()
| 24.219512 | 82 | 0.637462 | [
"BSD-2-Clause"
] | alyosha1879/grako | examples/regex/regex_parser.py | 993 | Python |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class VolumeSnapshotGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[VolumeSnapshot]',
'total': 'list[VolumeSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.VolumeSnapshot]
total=None, # type: List[models.VolumeSnapshot]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[VolumeSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
total (list[VolumeSnapshot]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumeSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumeSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 39.647059 | 524 | 0.618323 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flasharray/FA_2_7/models/volume_snapshot_get_response.py | 5,392 | Python |
"""A fingerprint + random forest model.
Try to generate independent and identically distributed figerprint as decoy.
"""
import os
import sys
import json
import argparse
import numpy as np
from pathlib import Path
from tqdm import tqdm
import scipy.sparse as sp
from scipy.spatial import distance
from multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit.Chem import Descriptors
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-i', '--index', required=True)
parser.add_argument(
'-d', '--datadir', required=True, help="pdbbind datadir, like v2018")
parser.add_argument(
'-u', '--uclust', help="uclust output, format: https://www.drive5.com/usearch/manual/opt_uc.html")
args = parser.parse_args()
DATADIR = Path(args.datadir)
def read_index(index_file):
codes = []
pKs = []
with open(index_file) as f:
for i in f:
if i[0] == '#': continue
code, reso, year, pK, *others = i.split()
codes.append(code)
pKs.append(float(pK))
return codes, pKs
def getProp(mol):
mw = Descriptors.ExactMolWt(mol)
logp = Descriptors.MolLogP(mol)
rotb = Descriptors.NumRotatableBonds(mol)
hbd = Descriptors.NumHDonors(mol)
hba = Descriptors.NumHAcceptors(mol)
q = Chem.GetFormalCharge(mol)
return tuple([mw, logp, rotb, hbd, hba, q])
def load_fps(codes):
print("Loading ligand fingerprint")
fps = []
for i, code in tqdm(enumerate(codes), total=len(codes)):
# already converted ligand.mol2 to ligand.pdb by babel
path = DATADIR / code / (code + '_ligand.pdb')
if not path.exists():
fps.append(None)
continue
mol = Chem.MolFromPDBFile(str(path))
if mol is None:
fps.append(None)
continue
# fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=512)
fp = getProp(mol)
fps.append(fp)
notNone = sum([1 for i in fps if i is not None])
print('succeed loaded {}/{}'.format(notNone, len(codes)))
return fps
def load_clust(uclust_file, codes):
clust_nums = [None for i in codes]
all_clust_nums = []
labels = []
with open(uclust_file) as f:
for line in f:
fields = line.split()
all_clust_nums.append( int(fields[1]))
labels.append(fields[8])
for i, code in enumerate(codes):
try:
idx = labels.index(code)
clust_nums[i] = all_clust_nums[idx]
except ValueError:
continue
return clust_nums
codes, pKs = read_index(args.index)
fps = load_fps(codes)
Nones = [i for i in range(len(codes)) if fps[i] is None]
fps = [j for i,j in enumerate(fps) if i not in Nones]
pKs = [j for i,j in enumerate(pKs) if i not in Nones]
codes = [j for i,j in enumerate(codes) if i not in Nones]
X = np.array(fps)
if args.uclust:
clust_nums = load_clust(args.uclust, codes)
Nones.extend([i for i in range(len(codes)) if clust_nums[i] is None])
Nones = set(Nones)
fps = [j for i,j in enumerate(fps) if i not in Nones]
pKs = [j for i,j in enumerate(pKs) if i not in Nones]
codes = [j for i,j in enumerate(codes) if i not in Nones]
clust_nums = [j for i,j in enumerate(clust_nums) if i not in Nones]
clust_nums = np.array(clust_nums, dtype=int)
join_clust = np.zeros_like(clust_nums)
for i, num in enumerate(set(clust_nums)):
mask = clust_nums == num
# all cluster smaller than 5 will set as cluster 0
if sum(mask) >= 10:
join_clust[mask] = i+1
nb_clust = max(join_clust) + 1
print(join_clust)
one_hot = np.eye(nb_clust, dtype=int)[join_clust]
X = np.hstack((one_hot, fps))
X = one_hot
print(X.shape)
pKs = np.array(pKs)
# filter None
for seed in (111, 222, 333):
np.random.seed(seed)
N = len(codes)
perm = np.random.permutation(N)
train_idx = perm[:int(N*0.8)]
valid_idx = perm[int(N*0.8):int(N*0.9)]
test_idx = perm[int(N*0.9):]
train_X = X[train_idx]
test_X = X[test_idx]
train_pKs = pKs[train_idx]
test_pKs = pKs[test_idx]
clf = RandomForestRegressor(
n_estimators=10,
max_depth=15,
# min_samples_split=10,
min_samples_split=5,
min_samples_leaf=1,
random_state=0,
n_jobs=8,
)
clf.fit(train_X, train_pKs)
pred_pKs = clf.predict(test_X)
r2 = np.corrcoef(test_pKs, pred_pKs)[0,1] ** 2
print('seed {} r2: {}'.format(seed, r2))
| 30.754839 | 106 | 0.646528 | [
"MIT"
] | hnlab/can-ai-do | pdbbind/props_random_forest.py | 4,767 | Python |
from datanator_query_python.util import mongo_util
from pymongo.collation import Collation, CollationStrength
class QueryXmdb:
def __init__(self, username=None, password=None, server=None, authSource='admin',
database='datanator', max_entries=float('inf'), verbose=True, collection_str='ecmdb',
readPreference='nearest', replicaSet=None):
self.mongo_manager = mongo_util.MongoUtil(MongoDB=server, username=username,
password=password, authSource=authSource, db=database,
readPreference=readPreference, replicaSet=replicaSet)
self.collation = Collation(locale='en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
self.client, self.db, self.collection = self.mongo_manager.con_db(collection_str)
self.collection_str = collection_str
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1,
'inchikey': 1, 'smiles': 1, 'name': 1}):
"""Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
"""
result = []
query = {'concentrations': {'$ne': None} }
docs = self.collection.find(filter=query, projection=projection)
for doc in docs:
result.append(doc)
return result
def get_name_by_inchikey(self, inchikey):
"""Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
"""
query = {'inchikey': inchikey}
projection = {'_id': 0, 'name': 1}
doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation)
if doc is None:
return 'No metabolite found.'
else:
return doc['name']
def get_standard_ids_by_id(self, _id):
"""Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
"""
if self.collection_str == 'ecmdb':
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
# projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0}
doc = self.collection.find_one(filter=query)
if doc is None:
return {}
else:
return doc | 38.780822 | 140 | 0.579301 | [
"MIT"
] | KarrLab/datanator_query_python | datanator_query_python/query/query_xmdb.py | 2,831 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
from typing import List, Optional, Union
import torch
from detectron2.config import configurable
from . import detection_utils as utils
from . import transforms as T
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
if cfg.MODEL.KEYPOINT_ON:
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret["precomputed_proposal_topk"] = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk is not None:
utils.transform_proposals(
dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
)
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
# dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 43.154255 | 100 | 0.644768 | [
"Apache-2.0"
] | Jerrypiglet/detectron2 | detectron2/data/dataset_mapper.py | 8,113 | Python |
from bs4 import BeautifulSoup
from urllib.request import urlopen
def main():
url = "http://www.networksciencelab.com"
with urlopen(url) as doc:
soup = BeautifulSoup(doc)
links = [(link.string, link['href'])
for link in soup.find_all('a')
if link.has_attr('href')
]
# print(links)
broken_links = []
for _, link in links:
try:
urlopen(link, timeout=3)
except Exception as e:
print(f'broken link {link}, {e}')
broken_links.append(link)
print(broken_links)
if __name__ == "__main__":
main() | 24.08 | 45 | 0.58804 | [
"MIT"
] | zzragida/study-datascience | data-science-essentials-in-python/broken-link-detector.py | 602 | Python |
#!/usr/bin/env python3.8
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import errno
import json
import os
import shutil
import sys
import tarfile
import tempfile
from functools import total_ordering
@total_ordering
class Part(object):
def __init__(self, json):
self.meta = json['meta']
self.type = json['type']
def __lt__(self, other):
return self.meta < other.meta and self.type < other.type
def __eq__(self, other):
return (
isinstance(other, self.__class__) and self.meta == other.meta and
self.type == other.type)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.meta, self.type))
@contextlib.contextmanager
def _open_archive(archive, directory):
'''Manages a directory in which an existing SDK is laid out.'''
if directory:
yield directory
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
# Extract the tarball into the temporary directory.
# This is vastly more efficient than accessing files one by one via
# the tarfile API.
with tarfile.open(archive) as archive_file:
archive_file.extractall(temp_dir)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set')
@contextlib.contextmanager
def _open_output(archive, directory):
'''Manages the output of this script.'''
if directory:
# Remove any existing output.
shutil.rmtree(directory, ignore_errors=True)
yield directory
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
try:
yield temp_dir
# Write the archive file.
with tarfile.open(archive, "w:gz") as archive_file:
archive_file.add(temp_dir, arcname='')
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set')
def _get_manifest(sdk_dir):
'''Returns the set of elements in the given SDK.'''
with open(os.path.join(sdk_dir, 'meta', 'manifest.json'), 'r') as manifest:
return json.load(manifest)
def _get_meta(element, sdk_dir):
'''Returns the contents of the given element's manifest in a given SDK.'''
with open(os.path.join(sdk_dir, element), 'r') as meta:
return json.load(meta)
def _get_type(element):
'''Returns the SDK element type.'''
# For versioned SDK elements, the type is inside the data field.
if 'schema_id' in element:
return element['data']['type']
return element['type']
def _get_files(element_meta):
'''Extracts the files associated with the given element.
Returns a 2-tuple containing:
- the set of arch-independent files;
- the sets of arch-dependent files, indexed by architecture.
'''
type = _get_type(element_meta)
common_files = set()
arch_files = {}
if type == 'cc_prebuilt_library':
common_files.update(element_meta['headers'])
for arch, binaries in element_meta['binaries'].items():
contents = set()
contents.add(binaries['link'])
if 'dist' in binaries:
contents.add(binaries['dist'])
if 'debug' in binaries:
contents.add(binaries['debug'])
arch_files[arch] = contents
elif type == 'cc_source_library':
common_files.update(element_meta['headers'])
common_files.update(element_meta['sources'])
elif type == 'dart_library':
common_files.update(element_meta['sources'])
elif type == 'fidl_library':
common_files.update(element_meta['sources'])
elif type in ['host_tool', 'companion_host_tool']:
if 'files' in element_meta:
common_files.update(element_meta['files'])
if 'target_files' in element_meta:
arch_files.update(element_meta['target_files'])
elif type == 'loadable_module':
common_files.update(element_meta['resources'])
arch_files.update(element_meta['binaries'])
elif type == 'sysroot':
for arch, version in element_meta['versions'].items():
contents = set()
contents.update(version['headers'])
contents.update(version['link_libs'])
contents.update(version['dist_libs'])
contents.update(version['debug_libs'])
arch_files[arch] = contents
elif type == 'documentation':
common_files.update(element_meta['docs'])
elif type in ('config', 'license', 'component_manifest'):
common_files.update(element_meta['data'])
elif type in ('version_history'):
# These types are pure metadata.
pass
elif type == 'bind_library':
common_files.update(element_meta['sources'])
else:
raise Exception('Unknown element type: ' + type)
return (common_files, arch_files)
def _ensure_directory(path):
'''Ensures that the directory hierarchy of the given path exists.'''
target_dir = os.path.dirname(path)
try:
os.makedirs(target_dir)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(target_dir):
pass
else:
raise
def _copy_file(file, source_dir, dest_dir):
'''Copies a file to a given path, taking care of creating directories if
needed.
'''
source = os.path.join(source_dir, file)
destination = os.path.join(dest_dir, file)
_ensure_directory(destination)
shutil.copy2(source, destination)
def _copy_files(files, source_dir, dest_dir):
'''Copies a set of files to a given directory.'''
for file in files:
_copy_file(file, source_dir, dest_dir)
def _copy_identical_files(
set_one, source_dir_one, set_two, source_dir_two, dest_dir):
'''Verifies that two sets of files are absolutely identical and then copies
them to the output directory.
'''
if set_one != set_two:
return False
# Not verifying that the contents of the files are the same, as builds are
# not exactly stable at the moment.
_copy_files(set_one, source_dir_one, dest_dir)
return True
def _copy_element(element, source_dir, dest_dir):
'''Copy an entire SDK element to a given directory.'''
meta = _get_meta(element, source_dir)
common_files, arch_files = _get_files(meta)
files = common_files
for more_files in arch_files.values():
files.update(more_files)
_copy_files(files, source_dir, dest_dir)
# Copy the metadata file as well.
_copy_file(element, source_dir, dest_dir)
def _write_meta(element, source_dir_one, source_dir_two, dest_dir):
'''Writes a meta file for the given element, resulting from the merge of the
meta files for that element in the two given SDK directories.
'''
meta_one = _get_meta(element, source_dir_one)
meta_two = _get_meta(element, source_dir_two)
# TODO(fxbug.dev/5362): verify that the common parts of the metadata files are in
# fact identical.
type = _get_type(meta_one)
meta = {}
if type in ('cc_prebuilt_library', 'loadable_module'):
meta = meta_one
meta['binaries'].update(meta_two['binaries'])
elif type == 'sysroot':
meta = meta_one
meta['versions'].update(meta_two['versions'])
elif type in ['host_tool', 'companion_host_tool']:
meta = meta_one
if not 'target_files' in meta:
meta['target_files'] = {}
if 'target_files' in meta_two:
meta['target_files'].update(meta_two['target_files'])
elif type in ('cc_source_library', 'dart_library', 'fidl_library',
'documentation', 'device_profile', 'config', 'license',
'component_manifest', 'bind_library', 'version_history'):
# These elements are arch-independent, the metadata does not need any
# update.
meta = meta_one
else:
raise Exception('Unknown element type: ' + type)
meta_path = os.path.join(dest_dir, element)
_ensure_directory(meta_path)
with open(meta_path, 'w') as meta_file:
json.dump(
meta, meta_file, indent=2, sort_keys=True, separators=(',', ': '))
return True
def _has_host_content(parts):
'''Returns true if the given list of SDK parts contains an element with
content built for a host.
'''
return 'host_tool' in [part.type for part in parts]
def _write_manifest(source_dir_one, source_dir_two, dest_dir):
'''Writes a manifest file resulting from the merge of the manifest files for
the two given SDK directories.
'''
manifest_one = _get_manifest(source_dir_one)
manifest_two = _get_manifest(source_dir_two)
parts_one = set([Part(p) for p in manifest_one['parts']])
parts_two = set([Part(p) for p in manifest_two['parts']])
manifest = {'arch': {}}
# Schema version.
if manifest_one['schema_version'] != manifest_two['schema_version']:
print('Error: mismatching schema version')
return False
manifest['schema_version'] = manifest_one['schema_version']
# Host architecture.
host_archs = set()
if _has_host_content(parts_one):
host_archs.add(manifest_one['arch']['host'])
if _has_host_content(parts_two):
host_archs.add(manifest_two['arch']['host'])
if not host_archs:
# The archives do not have any host content. The architecture is not
# meaningful in that case but is still needed: just pick one.
host_archs.add(manifest_one['arch']['host'])
if len(host_archs) != 1:
print(
'Error: mismatching host architecture: %s' % ', '.join(host_archs))
return False
manifest['arch']['host'] = list(host_archs)[0]
# Id.
if manifest_one['id'] != manifest_two['id']:
print('Error: mismatching id')
return False
manifest['id'] = manifest_one['id']
# Root.
if manifest_one['root'] != manifest_two['root']:
print('Error: mismatching root')
return False
manifest['root'] = manifest_one['root']
# Target architectures.
manifest['arch']['target'] = sorted(
set(manifest_one['arch']['target']) |
set(manifest_two['arch']['target']))
# Parts.
manifest['parts'] = [vars(p) for p in sorted(parts_one | parts_two)]
manifest_path = os.path.join(dest_dir, 'meta', 'manifest.json')
_ensure_directory(manifest_path)
with open(manifest_path, 'w') as manifest_file:
json.dump(
manifest,
manifest_file,
indent=2,
sort_keys=True,
separators=(',', ': '))
return True
def main():
parser = argparse.ArgumentParser(
description=('Merges the contents of two SDKs'))
first_group = parser.add_mutually_exclusive_group(required=True)
first_group.add_argument(
'--first-archive',
help='Path to the first SDK - as an archive',
default='')
first_group.add_argument(
'--first-directory',
help='Path to the first SDK - as a directory',
default='')
second_group = parser.add_mutually_exclusive_group(required=True)
second_group.add_argument(
'--second-archive',
help='Path to the second SDK - as an archive',
default='')
second_group.add_argument(
'--second-directory',
help='Path to the second SDK - as a directory',
default='')
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument(
'--output-archive',
help='Path to the merged SDK - as an archive',
default='')
output_group.add_argument(
'--output-directory',
help='Path to the merged SDK - as a directory',
default='')
args = parser.parse_args()
has_errors = False
with _open_archive(args.first_archive, args.first_directory) as first_dir, \
_open_archive(args.second_archive, args.second_directory) as second_dir, \
_open_output(args.output_archive, args.output_directory) as out_dir:
first_elements = set(
[Part(p) for p in _get_manifest(first_dir)['parts']])
second_elements = set(
[Part(p) for p in _get_manifest(second_dir)['parts']])
common_elements = first_elements & second_elements
# Copy elements that appear in a single SDK.
for element in sorted(first_elements - common_elements):
_copy_element(element.meta, first_dir, out_dir)
for element in (second_elements - common_elements):
_copy_element(element.meta, second_dir, out_dir)
# Verify and merge elements which are common to both SDKs.
for raw_element in sorted(common_elements):
element = raw_element.meta
first_meta = _get_meta(element, first_dir)
second_meta = _get_meta(element, second_dir)
first_common, first_arch = _get_files(first_meta)
second_common, second_arch = _get_files(second_meta)
# Common files should not vary.
if not _copy_identical_files(first_common, first_dir, second_common,
second_dir, out_dir):
print('Error: different common files for %s' % (element))
has_errors = True
continue
# Arch-dependent files need to be merged in the metadata.
all_arches = set(first_arch.keys()) | set(second_arch.keys())
for arch in all_arches:
if arch in first_arch and arch in second_arch:
if not _copy_identical_files(first_arch[arch], first_dir,
second_arch[arch], second_dir,
out_dir):
print(
'Error: different %s files for %s' %
(arch, element))
has_errors = True
continue
elif arch in first_arch:
_copy_files(first_arch[arch], first_dir, out_dir)
elif arch in second_arch:
_copy_files(second_arch[arch], second_dir, out_dir)
if not _write_meta(element, first_dir, second_dir, out_dir):
print('Error: unable to merge meta for %s' % (element))
has_errors = True
if not _write_manifest(first_dir, second_dir, out_dir):
print('Error: could not write manifest file')
has_errors = True
# TODO(fxbug.dev/5362): verify that metadata files are valid.
return 1 if has_errors else 0
if __name__ == '__main__':
sys.exit(main())
| 36.091127 | 85 | 0.634485 | [
"BSD-2-Clause"
] | allansrc/fuchsia | scripts/sdk/merger/merge.py | 15,050 | Python |
def sort(arr):
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while j >=0 and key < arr[j] :
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
arr = list(map(int,input("Enter Numbers: ").split()))
print(sort(arr))
| 24.5 | 53 | 0.442177 | [
"MIT"
] | Aashutosh-922/Data-Structures-And-Algorithms | sorting/python/insertion-sort.py | 294 | Python |
import boto
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
conn = boto.dynamodb.connect_to_region('us-west-2')
connection=boto.dynamodb2.connect_to_region('us-west-2')
users = Table.create('users', schema=[
HashKey('username'), # defaults to STRING data_type
], throughput={
'read': 5,
'write': 15,
},
)
consumerTable.put_item({"username":"user"})
| 28.571429 | 56 | 0.72 | [
"Apache-2.0"
] | samvarankashyap/amazondynamodb | samplescripts/create_table.py | 400 | Python |
"""
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executing via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"name\": \"string\", \"slots\": 0, \"occupied_slots\": 0, \"used_slots\": 0, \"queued_slots\": 0, \"open_slots\": 0 } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Summary of Changes | Airflow version | Description | |-|-| | v2.0 | Initial release | | v2.0.2 | Added /plugins endpoint | | v2.1 | New providers endpoint | # Trying the API You can use a third party airflow_client.client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest airflow_client.client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X PATCH 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backend` command as in the example below. ```bash $ airflow config get-value api auth_backend airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your airflow_client.client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a airflow_client.client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This airflow_client.client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, e.g. the resource it tries to create already exists. ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import airflow_client.client
from airflow_client.client.model.dag import DAG
globals()['DAG'] = DAG
from airflow_client.client.model.dag_collection_all_of import DAGCollectionAllOf
class TestDAGCollectionAllOf(unittest.TestCase):
"""DAGCollectionAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDAGCollectionAllOf(self):
"""Test DAGCollectionAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = DAGCollectionAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 230.589744 | 8,173 | 0.759813 | [
"Apache-2.0"
] | sptsakcg/airflow-client-python | airflow_client/test/test_dag_collection_all_of.py | 8,993 | Python |
print("=="*20)
print(f' Banco dev')
print("=="*20)
sac = float(input('Qual o valor voce quer sacar?R$ '))
total = sac
ced = 50
totced = 0
while True:
if total >=ced:
total -= ced
totced += 1
else:
print(f'Total de {totced} cedulas de R${ced}')
if ced == 50:
ced = 20
elif ced == 20:
ced = 10
elif ced == 10:
ced = 1
totced = 0
if total == 0:
break | 21.304348 | 55 | 0.436735 | [
"MIT"
] | lucasohara98/Python_CursoemVideo | PythonExecicios/ex071.py | 490 | Python |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
from __future__ import absolute_import
from uuid import uuid1
from datetime import datetime
import pytest
import pytz
from pycoin.key.BIP32Node import BIP32Node
from transactions import Transactions
from transactions.services.daemonservice import BitcoinDaemonService
def test_blockchainspider_init(rpcuser, rpcpassword, host, port):
from spool.spoolex import BlockchainSpider
blockchain_spider = BlockchainSpider(
testnet=True,
service='daemon',
username=rpcuser,
password=rpcpassword,
host=host,
port=port,
)
assert isinstance(blockchain_spider._t, Transactions)
assert blockchain_spider._t.testnet is True
assert blockchain_spider._t._service._username == rpcuser
assert blockchain_spider._t._service._password == rpcpassword
assert blockchain_spider._t._service._host == host
assert blockchain_spider._t._service._port == port
assert isinstance(blockchain_spider._t._service, BitcoinDaemonService)
@pytest.mark.usefixtures('init_blockchain')
def test_check_script(rpconn, piece_hashes, spool_regtest, transactions):
"""
Test :staticmethod:`check_script`.
Args;
alice (str): bitcoin address of alice, the sender
bob (str): bitcoin address of bob, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) to bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
"""
from spool import Spool
from spool.spoolex import BlockchainSpider
sender_password = uuid1().hex.encode('utf-8')
sender_wallet = BIP32Node.from_master_secret(sender_password,
netcode='XTN')
sender_address = sender_wallet.bitcoin_address()
rpconn.importaddress(sender_address)
rpconn.sendtoaddress(sender_address, Spool.FEE/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.generate(1)
receiver_address = rpconn.getnewaddress()
# TODO do not rely on Spool
txid = spool_regtest.transfer(
('', sender_address),
receiver_address,
piece_hashes,
sender_password,
5,
min_confirmations=1,
)
verb = BlockchainSpider.check_script(transactions.get(txid)['vouts'])
assert verb == b'ASCRIBESPOOL01TRANSFER5'
@pytest.mark.usefixtures('init_blockchain')
def test_check_script_with_invalid_tx(eve, wendy, rpconn, transactions):
"""
An invalid transaction in this context is one that does not contain a
``vout`` for which the ``hex`` is a valid ``Spool`` verb.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
"""
from spool.spoolex import BlockchainSpider
rpconn.sendtoaddress(eve, 2)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 1)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(Exception) as exc:
BlockchainSpider.check_script(decoded_raw_transfer_tx['vouts'])
assert exc.value.args[0] == 'Invalid ascribe transaction'
@pytest.mark.usefixtures('init_blockchain')
def test_get_addresses(rpconn, piece_hashes, spool_regtest, transactions):
from spool import Spool
from spool.spoolex import BlockchainSpider
sender_password = uuid1().hex.encode('utf-8')
sender_wallet = BIP32Node.from_master_secret(sender_password,
netcode='XTN')
sender_address = sender_wallet.bitcoin_address()
rpconn.importaddress(sender_address)
rpconn.sendtoaddress(sender_address, Spool.FEE/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.sendtoaddress(sender_address, Spool.TOKEN/100000000)
rpconn.generate(1)
receiver_address = rpconn.getnewaddress()
# TODO do not rely on Spool
txid = spool_regtest.transfer(
('', sender_address),
receiver_address,
piece_hashes,
sender_password,
5,
min_confirmations=1,
)
decoded_raw_transfer_tx = transactions.get(txid)
addresses = BlockchainSpider._get_addresses(decoded_raw_transfer_tx)
assert len(addresses) == 3
assert addresses[0] == sender_address
assert addresses[1] == receiver_address
assert addresses[2] == piece_hashes[0]
@pytest.mark.usefixtures('init_blockchain')
def test_get_addresses_with_invalid_tx(eve, wendy, rpconn, transactions):
"""
An invalid transaction in this context is one that has inputs from
different addresses.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node
"""
from spool.spoolex import BlockchainSpider, InvalidTransactionError
rpconn.sendtoaddress(eve, 1)
rpconn.sendtoaddress(eve, 1)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 2)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(InvalidTransactionError) as exc:
BlockchainSpider._get_addresses(decoded_raw_transfer_tx)
assert isinstance(exc.value, InvalidTransactionError)
def test_decode_op_return():
from spool.spoolex import BlockchainSpider
op_return_hex = '6a174153435249424553504f4f4c30315452414e5346455235'
op_return = BlockchainSpider.decode_op_return(op_return_hex)
assert op_return == b'ASCRIBESPOOL01TRANSFER5'
def test_get_time_utc():
from spool.spoolex import BlockchainSpider, TIME_FORMAT
time = '2016-06-13T17:28:03 UTC'
timestamp = BlockchainSpider._get_time_utc(time)
assert timestamp
assert datetime.fromtimestamp(timestamp,
tz=pytz.UTC).strftime(TIME_FORMAT) == time
def test_simplest_history(federation, alice, piece_hashes,
spool_regtest, spider, rpconn):
txid = spool_regtest.register_piece(
('', federation),
alice,
piece_hashes,
b'federation-secret',
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hashes[0])
assert len(history) == 1
assert '' in history
assert len(history['']) == 1
piece_registration_data = history[''][0]
assert piece_registration_data['action'] == 'PIECE'
assert piece_registration_data['edition_number'] == ''
assert piece_registration_data['from_address'] == federation
assert piece_registration_data['number_editions'] == 0
assert piece_registration_data['piece_address'] == piece_hashes[0]
assert piece_registration_data['timestamp_utc']
assert piece_registration_data['to_address'] == alice
assert piece_registration_data['txid'] == txid
assert piece_registration_data['verb'] == b'ASCRIBESPOOL01PIECE'
def test_register_editions_qty_history(federation,
alice,
registered_piece_hashes,
spool_regtest,
spider,
rpconn):
txid = spool_regtest.editions(
('', federation),
alice,
registered_piece_hashes,
b'federation-secret',
3,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(registered_piece_hashes[0])
assert len(history) == 2
assert '' in history
assert 0 in history
assert len(history['']) == 1
assert len(history[0]) == 1
editions_data = history[0][0]
assert editions_data['action'] == 'EDITIONS'
assert editions_data['edition_number'] == 0
assert editions_data['from_address'] == federation
assert editions_data['number_editions'] == 3
assert editions_data['piece_address'] == registered_piece_hashes[0]
assert editions_data['timestamp_utc']
assert editions_data['to_address'] == alice
assert editions_data['txid'] == txid
assert editions_data['verb'] == b'ASCRIBESPOOL01EDITIONS3'
def test_register_edition_history(federation, alice, spool_regtest, spider,
registered_edition_qty_hashes, rpconn):
edition_number = 2
piece_hash = registered_edition_qty_hashes[0]
txid = spool_regtest.register(
('', federation),
alice,
registered_edition_qty_hashes,
b'federation-secret',
edition_number,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hash)
assert len(history) == 3
assert '' in history
assert 0 in history
assert edition_number in history
assert len(history['']) == 1
assert len(history[0]) == 1
assert len(history[edition_number]) == 1
edition_registration_data = history[edition_number][0]
assert edition_registration_data['action'] == 'REGISTER'
assert edition_registration_data['edition_number'] == edition_number
assert edition_registration_data['from_address'] == federation
assert edition_registration_data['number_editions'] == 3
assert edition_registration_data['piece_address'] == piece_hash
assert edition_registration_data['timestamp_utc']
assert edition_registration_data['to_address'] == alice
assert edition_registration_data['txid'] == txid
assert edition_registration_data['verb'] == b'ASCRIBESPOOL01REGISTER2'
def test_transfer_history(federation, alice, bob, spool_regtest, spider,
registered_edition_two_hashes, rpconn):
from .conftest import reload_address
reload_address(alice, rpconn)
edition_number = 2
piece_hash = registered_edition_two_hashes[0]
txid = spool_regtest.transfer(
('', alice),
bob,
registered_edition_two_hashes,
b'alice-secret',
edition_number,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hash)
assert len(history) == 3
assert '' in history
assert 0 in history
assert edition_number in history
assert len(history['']) == 1
assert len(history[0]) == 1
assert len(history[edition_number]) == 2
transfer_data = history[edition_number][1]
assert transfer_data['action'] == 'TRANSFER'
assert transfer_data['edition_number'] == edition_number
assert transfer_data['from_address'] == alice
assert transfer_data['number_editions'] == 3
assert transfer_data['piece_address'] == piece_hash
assert transfer_data['timestamp_utc']
assert transfer_data['to_address'] == bob
assert transfer_data['txid'] == txid
assert transfer_data['verb'] == b'ASCRIBESPOOL01TRANSFER2'
def test_loan_history(federation, bob, carol, spool_regtest, spider,
transferred_edition_two_hashes, rpconn):
from .conftest import reload_address
edition_number = 2
loan_start, loan_end = '171017', '181018'
piece_hash = transferred_edition_two_hashes[0]
reload_address(bob, rpconn)
txid = spool_regtest.loan(
('', bob),
carol,
transferred_edition_two_hashes,
b'bob-secret',
2,
loan_start,
loan_end,
min_confirmations=1,
)
rpconn.generate(1)
history = spider.history(piece_hash)
assert len(history) == 3
assert '' in history
assert 0 in history
assert edition_number in history
assert len(history['']) == 1
assert len(history[0]) == 1
assert len(history[edition_number]) == 3
loan_data = history[edition_number][2]
assert loan_data['action'] == 'LOAN'
assert loan_data['edition_number'] == edition_number
assert loan_data['from_address'] == bob
assert loan_data['number_editions'] == 3
assert loan_data['piece_address'] == piece_hash
assert loan_data['timestamp_utc']
assert loan_data['to_address'] == carol
assert loan_data['txid'] == txid
assert loan_data['verb'] == b'ASCRIBESPOOL01LOAN2/171017181018'
def test_chain(loaned_edition_two_hashes, spider):
from spool import BlockchainSpider
history = spider.history(loaned_edition_two_hashes[0])
chain = BlockchainSpider.chain(history, 2)
assert len(chain) == 3
assert chain[0]['action'] == 'REGISTER'
assert chain[1]['action'] == 'TRANSFER'
assert chain[2]['action'] == 'LOAN'
assert chain[0]['edition_number'] == 2
assert chain[1]['edition_number'] == 2
assert chain[2]['edition_number'] == 2
def test_strip_loan(loaned_edition_two_hashes, spider):
from spool import BlockchainSpider
history = spider.history(loaned_edition_two_hashes[0])
chain = BlockchainSpider.chain(history, 2)
assert len(chain) == 3
assert 'LOAN' in (tx['action'] for tx in chain)
chain = BlockchainSpider.strip_loan(chain)
assert len(chain) == 2
assert 'LOAN' not in (tx['action'] for tx in chain)
def test_pprint(transferred_edition_two_hashes, spider):
from spool import BlockchainSpider
history = spider.history(transferred_edition_two_hashes[0])
BlockchainSpider.pprint(history)
| 37.608696 | 76 | 0.69328 | [
"Apache-2.0"
] | ascribe/pyspool | tests/test_spoolex.py | 13,840 | Python |
#!/usr/bin/python3
import time
from flask import url_for
from urllib.request import urlopen
from . util import set_original_response, set_modified_response, live_server_setup
sleep_time_for_fetch_thread = 3
# Basic test to check inscriptus is not adding return line chars, basically works etc
def test_inscriptus():
from inscriptis import get_text
html_content="<html><body>test!<br/>ok man</body></html>"
stripped_text_from_html = get_text(html_content)
assert stripped_text_from_html == 'test!\nok man'
def test_check_basic_change_detection_functionality(client, live_server):
set_original_response()
live_server_setup(live_server)
# Add our URL to the import page
res = client.post(
url_for("import_page"),
data={"urls": url_for('test_endpoint', _external=True)},
follow_redirects=True
)
assert b"1 Imported" in res.data
time.sleep(sleep_time_for_fetch_thread)
# Do this a few times.. ensures we dont accidently set the status
for n in range(3):
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
# It should report nothing found (no new 'unviewed' class)
res = client.get(url_for("index"))
assert b'unviewed' not in res.data
assert b'test-endpoint' in res.data
# Default no password set, this stuff should be always available.
assert b"SETTINGS" in res.data
assert b"BACKUP" in res.data
assert b"IMPORT" in res.data
#####################
# Check HTML conversion detected and workd
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
# Check this class does not appear (that we didnt see the actual source)
assert b'foobar-detection' not in res.data
# Make a change
set_modified_response()
res = urlopen(url_for('test_endpoint', _external=True))
assert b'which has this one new line' in res.read()
# Force recheck
res = client.get(url_for("api_watch_checknow"), follow_redirects=True)
assert b'1 watches are queued for rechecking.' in res.data
time.sleep(sleep_time_for_fetch_thread)
# Now something should be ready, indicated by having a 'unviewed' class
res = client.get(url_for("index"))
assert b'unviewed' in res.data
# #75, and it should be in the RSS feed
res = client.get(url_for("rss"))
expected_url = url_for('test_endpoint', _external=True)
assert b'<rss' in res.data
# re #16 should have the diff in here too
assert b'(into ) which has this one new line' in res.data
assert b'CDATA' in res.data
assert expected_url.encode('utf-8') in res.data
# Following the 'diff' link, it should no longer display as 'unviewed' even after we recheck it a few times
res = client.get(url_for("diff_history_page", uuid="first"))
assert b'Compare newest' in res.data
time.sleep(2)
# Do this a few times.. ensures we dont accidently set the status
for n in range(2):
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
# It should report nothing found (no new 'unviewed' class)
res = client.get(url_for("index"))
assert b'unviewed' not in res.data
assert b'head title' not in res.data # Should not be present because this is off by default
assert b'test-endpoint' in res.data
set_original_response()
# Enable auto pickup of <title> in settings
res = client.post(
url_for("settings_page"),
data={"extract_title_as_title": "1", "minutes_between_check": 180, 'fetch_backend': "html_requests"},
follow_redirects=True
)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(sleep_time_for_fetch_thread)
res = client.get(url_for("index"))
assert b'unviewed' in res.data
# It should have picked up the <title>
assert b'head title' in res.data
#
# Cleanup everything
res = client.get(url_for("api_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
| 33.170543 | 111 | 0.685441 | [
"Apache-2.0"
] | shaikhspeare/changedetection.io | changedetectionio/tests/test_backend.py | 4,279 | Python |
# -*- coding: utf-8 -*-
"""
kay.ext.gaema.urls
:Copyright: (c) 2009 Takashi Matsuo <[email protected]>
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from kay.routing import (
ViewGroup, Rule
)
view_groups = [
ViewGroup(
Rule('/login/<service>', endpoint='login',
view='kay.ext.gaema.views.login'),
Rule('/logout/<service>', endpoint='logout',
view='kay.ext.gaema.views.logout'),
Rule('/marketplace_login/a/<domain>', endpoint='marketplace_login',
view='kay.ext.gaema.views.marketplace_login'),
Rule('/marketplace_logout/<domain>', endpoint='marketplace_logout',
view='kay.ext.gaema.views.marketplace_logout'),
Rule('/select_service/<targets>', endpoint='select_service',
view='kay.ext.gaema.views.select_service'),
)
]
| 28.931034 | 71 | 0.649583 | [
"Apache-2.0",
"BSD-3-Clause"
] | IanLewis/kay | kay/ext/gaema/urls.py | 839 | Python |
from django.db import models
class Rice(models.Model):
state_name=models.CharField("state_name",max_length=255)
distict_name=models.CharField("distict_name",max_length=255)
crop_year=models.IntegerField("crop_year")
season=models.CharField("season",max_length=255)
crop=models.CharField("crop",max_length=255)
temperature=models.FloatField("temperature")
precipitation=models.FloatField("precipitation")
humidity=models.IntegerField("humidity")
area=models.IntegerField("area")
production=models.FloatField("production") | 43.153846 | 64 | 0.770053 | [
"MIT"
] | blitz-cmd/Rice-Crop-Yield-Prediction | Rice_Crop_Yield_Prediction/models.py | 561 | Python |
"""
Asyncio using Asyncio.Task to execute three math function in parallel
"""
import asyncio
@asyncio.coroutine
def factorial(number):
f = 1
for i in range(2, number+1):
print("Asyncio.Task: Compute factorial(%s)" % (i))
yield from asyncio.sleep(1)
f *= i
print("Asyncio.Task - factorial(%s) = %s" % (number, f))
@asyncio.coroutine
def fibonacci(number):
a, b = 0, 1
for i in range(number):
print("Asyncio.Task: Compute fibonacci (%s)" % (i))
yield from asyncio.sleep(1)
a, b = b, a + b
print("Asyncio.Task - fibonacci(%s) = %s" % (number, a))
@asyncio.coroutine
def binomialCoeff(n, k):
result = 1
for i in range(1, k+1):
result = result * (n-i+1) / i
print("Asyncio.Task: Compute binomialCoeff (%s)" % (i))
yield from asyncio.sleep(1)
print("Asyncio.Task - binomialCoeff(%s , %s) = %s" % (n,k,result))
if __name__ == "__main__":
tasks = [asyncio.Task(factorial(10)),
asyncio.Task(fibonacci(10)),
asyncio.Task(binomialCoeff(20,10))]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
| 29.45 | 70 | 0.60017 | [
"MIT"
] | jsdnhk/python-parallel-programming-cookbook-code | Chapter 4/asyncio_Task.py | 1,178 | Python |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from dataclasses import dataclass
from textwrap import dedent
from typing import Dict
from pants.engine.fs import FileContent
from pants.option.config import Config, TomlSerializer
@dataclass(frozen=True)
class ConfigFile:
content: str
default_values: Dict
expected_options: Dict
FILE_1 = ConfigFile(
content=dedent(
"""
[DEFAULT]
name = "%(env.NAME)s"
answer = 42
scale = 1.2
path = "/a/b/%(answer)s"
embed = "%(path)s::%(name)s"
disclaimer = '''
Let it be known
that.'''
[a]
# TODO: once TOML releases its new version with support for heterogenous lists, we should be
# able to rewrite this to `[1, 2, 3, "%(answer)s"`. See
# https://github.com/toml-lang/toml/issues/665.
list = ["1", "2", "3", "%(answer)s"]
list2.add = [7, 8, 9]
list3.remove = ["x", "y", "z"]
[b]
preempt = true
[c]
name = "overridden_from_default"
interpolated_from_section = "%(name)s is interpolated"
recursively_interpolated_from_section = "%(interpolated_from_section)s (again)"
[d.dict_val]
# Make sure we don't misinterpret `add` and `remove` as list options.
add = 0
remove = 0
nested = { nested_key = 'foo' }
[list_merging]
list1 = []
list2 = [1, 2]
list3.add = [3, 4]
list4.remove = [5]
list5 = [6, 7]
"""
),
default_values={
"name": "foo",
"answer": 42,
"scale": 1.2,
"path": "/a/b/42",
"embed": "/a/b/42::foo",
"disclaimer": "Let it be known\nthat.",
},
expected_options={
"a": {"list": '["1", "2", "3", "42"]', "list2": "+[7, 8, 9]", "list3": '-["x", "y", "z"]'},
"b": {"preempt": "True"},
"c": {
"name": "overridden_from_default",
"interpolated_from_section": "overridden_from_default is interpolated",
"recursively_interpolated_from_section": "overridden_from_default is interpolated (again)",
},
"d": {"dict_val": "{'add': 0, 'remove': 0, 'nested': {'nested_key': 'foo'}"},
"list_merging": {
"list1": "[]",
"list2": "[1, 2]",
"list3": "+[3, 4]",
"list4": "-[5]",
"list5": "[6, 7]",
},
},
)
FILE_2 = ConfigFile(
content=dedent(
"""
[a]
fast = true
[b]
preempt = false
[d]
list.add = [0, 1]
list.remove = [8, 9]
[empty_section]
[list_merging]
list1 = [11, 22]
list2.add = [33]
list3.add = [8, 9]
list3.remove = [4, 55]
list4 = [66]
list6.add = [77, 88]
"""
),
default_values={},
expected_options={
"a": {"fast": "True"},
"b": {"preempt": "False"},
"d": {"list": "+[0, 1],-[8, 9]"},
"empty_section": {},
"list_merging": {
"list1": "[11, 22]",
"list2": "+[33]",
"list3": "+[8, 9],-[4, 55]",
"list4": "[66]",
"list6": "+[77, 88]",
},
},
)
def _setup_config() -> Config:
parsed_config = Config.load(
file_contents=[
FileContent("file1.toml", FILE_1.content.encode()),
FileContent("file2.toml", FILE_2.content.encode()),
],
seed_values={"buildroot": "fake_buildroot"},
env={"NAME": "foo"},
)
assert ["file1.toml", "file2.toml"] == parsed_config.sources()
return parsed_config
class ConfigTest(unittest.TestCase):
def setUp(self) -> None:
self.config = _setup_config()
self.default_seed_values = Config._determine_seed_values(
seed_values={"buildroot": "fake_buildroot"},
env={"NAME": "foo"},
)
self.expected_combined_values: dict[str, dict[str, list[str]]] = {
"a": {
"list": ['["1", "2", "3", "42"]'],
"list2": ["+[7, 8, 9]"],
"list3": ['-["x", "y", "z"]'],
"fast": ["True"],
},
"b": {"preempt": ["True", "False"]},
"c": {
"name": ["overridden_from_default"],
"interpolated_from_section": ["overridden_from_default is interpolated"],
"recursively_interpolated_from_section": [
"overridden_from_default is interpolated (again)"
],
},
"d": {
"dict_val": ["{'add': 0, 'remove': 0, 'nested': {'nested_key': 'foo'}}"],
"list": ["+[0, 1],-[8, 9]"],
},
"empty_section": {},
"list_merging": {
"list1": ["[]", "[11, 22]"],
"list2": ["[1, 2]", "+[33]"],
"list3": ["+[3, 4]", "+[8, 9],-[4, 55]"],
"list4": ["-[5]", "[66]"],
"list5": ["[6, 7]"],
"list6": ["+[77, 88]"],
},
}
def test_default_values(self) -> None:
# This is used in `options_bootstrapper.py` to ignore default values when validating options.
file1_values = self.config.values[0]
file2_values = self.config.values[1]
# NB: string interpolation should only happen when calling _ConfigValues.get_value(). The
# values for _ConfigValues.defaults are not yet interpolated.
default_file1_values_unexpanded = {
**FILE_1.default_values,
"name": "%(env.NAME)s",
"path": "/a/b/%(answer)s",
"embed": "%(path)s::%(name)s",
}
assert file1_values.defaults == {
**self.default_seed_values,
**default_file1_values_unexpanded,
}
assert file2_values.defaults == self.default_seed_values
def test_get(self) -> None:
# Check the DEFAULT section
# N.B.: All values read from config files are read as str and only later converted by the
# options parser to the expected destination type; so we ensure we're comparing strings
# here.
for option, value in self.default_seed_values.items():
# Both config files have the seed values.
assert self.config.get(section="DEFAULT", option=option) == [str(value), str(value)]
for option, value in FILE_1.default_values.items():
# Only FILE_1 has explicit DEFAULT values.
assert self.config.get(section="DEFAULT", option=option) == [str(value)]
# Check the combined values.
for section, section_values in self.expected_combined_values.items():
for option, value_list in section_values.items():
assert self.config.get(section=section, option=option) == value_list
def test_empty(self) -> None:
config = Config.load([])
assert config.sources() == []
def test_toml_serializer() -> None:
original_values: Dict = {
"GLOBAL": {
"truthy": True,
"falsy": False,
"int": 0,
"float": 0.0,
"word": "hello there",
"listy": ["a", "b", "c"],
"map": {"a": 0, "b": 1},
},
"some-subsystem": {"o": ""},
}
assert TomlSerializer(original_values).normalize() == {
"GLOBAL": {**original_values["GLOBAL"], "map": "{'a': 0, 'b': 1}"},
"some-subsystem": {"o": ""},
}
def test_toml_serializer_list_add_remove() -> None:
original_values = {"GLOBAL": {"backend_packages.add": ["added"]}}
assert TomlSerializer(original_values).normalize() == { # type: ignore[arg-type]
"GLOBAL": {"backend_packages": "+['added']"}
}
| 32.158537 | 103 | 0.506763 | [
"Apache-2.0"
] | wonlay/pants | src/python/pants/option/config_test.py | 7,911 | Python |
import contextlib
import logging
from cStringIO import StringIO
from teuthology import misc
from teuthology.job_status import set_status
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def syslog(ctx, config):
"""
start syslog / stop syslog on exit.
"""
if ctx.archive is None:
# disable this whole feature if we're not going to archive the data
# anyway
yield
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
log_dir = '{adir}/syslog'.format(adir=archive_dir)
run.wait(
ctx.cluster.run(
args=['mkdir', '-p', '-m0755', '--', log_dir],
wait=False,
)
)
CONF = '/etc/rsyslog.d/80-cephtest.conf'
kern_log = '{log_dir}/kern.log'.format(log_dir=log_dir)
misc_log = '{log_dir}/misc.log'.format(log_dir=log_dir)
conf_lines = [
'kern.* -{kern_log};RSYSLOG_FileFormat'.format(kern_log=kern_log),
'*.*;kern.none -{misc_log};RSYSLOG_FileFormat'.format(
misc_log=misc_log),
]
conf_fp = StringIO('\n'.join(conf_lines))
try:
for rem in ctx.cluster.remotes.iterkeys():
log_context = 'system_u:object_r:var_log_t:s0'
for log_path in (kern_log, misc_log):
rem.run(args='touch %s' % log_path)
rem.chcon(log_path, log_context)
misc.sudo_write_file(
remote=rem,
path=CONF,
data=conf_fp,
)
conf_fp.seek(0)
run.wait(
ctx.cluster.run(
args=[
'sudo',
'service',
# a mere reload (SIGHUP) doesn't seem to make
# rsyslog open the files
'rsyslog',
'restart',
],
wait=False,
),
)
yield
finally:
log.info('Shutting down syslog monitoring...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
CONF,
run.Raw('&&'),
'sudo',
'service',
'rsyslog',
'restart',
],
wait=False,
),
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
'egrep', '--binary-files=text',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
'grep', '-v', 'lockdep is turned off',
run.Raw('|'),
'grep', '-v', 'trying to register non-static key',
run.Raw('|'),
'grep', '-v', 'DEBUG: fsize', # xfs_fsr
run.Raw('|'),
'grep', '-v', 'CRON', # ignore cron noise
run.Raw('|'),
'grep', '-v', 'BUG: bad unlock balance detected', # #6097
run.Raw('|'),
'grep', '-v', 'inconsistent lock state', # FIXME see #2523
run.Raw('|'),
'grep', '-v', '*** DEADLOCK ***', # part of lockdep output
run.Raw('|'),
'grep', '-v',
# FIXME see #2590 and #147
'INFO: possible irq lock inversion dependency detected',
run.Raw('|'),
'grep', '-v',
'INFO: NMI handler (perf_event_nmi_handler) took too long to run', # noqa
run.Raw('|'),
'grep', '-v', 'INFO: recovery required on readonly',
run.Raw('|'),
'grep', '-v', 'ceph-create-keys: INFO',
run.Raw('|'),
'egrep', '-v', '\\bsalt-master\\b|\\bsalt-minion\\b|\\bsalt-api\\b',
run.Raw('|'),
'head', '-n', '1',
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != '':
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
"'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(
ctx.cluster.run(
args=[
'find',
'{adir}/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
| 33.789157 | 94 | 0.415404 | [
"MIT"
] | dzedro/teuthology | teuthology/task/internal/syslog.py | 5,609 | Python |
#https://www.codechef.com/problems/DECINC
n = int(input())
if(n%4==0):
print(n+1)
else:
print(n-1) | 17.666667 | 41 | 0.613208 | [
"Unlicense"
] | 27Anurag/Competitive-programing-hacktoberfest-2021 | CodeChef/DECINC_Decrement OR Increment.py | 106 | Python |
r"""Distributed TensorFlow with Monitored Training Session.
This implements the 1a image recognition benchmark task, see https://mlbench.readthedocs.io/en/latest/benchmark-tasks.html#a-image-classification-resnet-cifar-10
for more details
Adapted from official tutorial::
https://www.tensorflow.org/deploy/distributed
Launch::
mpirun -n 3 --allow-run-as-root python ....
"""
import argparse
import logging
import os
import tensorflow as tf
from mlbench_core.controlflow.tensorflow.train_validation import train_round, \
validation_round
from mlbench_core.dataset.imagerecognition.tensorflow.cifar10 import \
DatasetCifar
from mlbench_core.evaluation.goals import task1_time_to_accuracy_light_goal, \
task1_time_to_accuracy_goal
from mlbench_core.evaluation.tensorflow.criterion import \
softmax_cross_entropy_with_logits_v2_l2_regularized
from mlbench_core.evaluation.tensorflow.metrics import TopKAccuracy
from mlbench_core.lr_scheduler.tensorflow.lr import manual_stepping
from mlbench_core.models.tensorflow.resnet_model import Cifar10Model
from mlbench_core.utils import Tracker
def define_graph(inputs, labels, is_training, batch_size, replicas_to_aggregate):
"""
Define graph for synchronized training.
"""
model = Cifar10Model(
resnet_size=20,
data_format='channels_last',
resnet_version=2,
dtype=tf.float32)
logits = model(inputs, is_training)
loss = softmax_cross_entropy_with_logits_v2_l2_regularized(
logits=logits,
labels=labels,
l2=2e-4,
# Exclude BN weights from L2 regularizer
loss_filter_fn=lambda name: 'batch_normalization' not in name)
# Use Top K accuracy as metrics
metrics = [
TopKAccuracy(logits, labels, topk=1),
TopKAccuracy(logits, labels, topk=5),
]
global_step = tf.train.get_or_create_global_step()
# scheduling learning steps.
lr_scheduler = manual_stepping(
global_step=global_step,
boundaries=[32000 // replicas_to_aggregate,
48000 // replicas_to_aggregate],
rates=[0.1, 0.01, 0.001],
warmup=False)
# Define the optimizer
optimizer_ = tf.train.MomentumOptimizer(
learning_rate=lr_scheduler,
momentum=0.9,
use_nesterov=True)
# Wrap optimizer with `SyncReplicasOptimizer`
optimizer = tf.train.SyncReplicasOptimizer(
optimizer_,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=replicas_to_aggregate)
hooks = [
optimizer.make_session_run_hook((rank == 0), num_tokens=0)
]
# The update for batch normalization.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Not all of the processes contribute one update. Some faster procs can push more updates.
grads_and_vars = list(optimizer.compute_gradients(
loss, tf.trainable_variables()))
train_op = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
return train_op, loss, metrics, hooks
def main(is_ps, run_id, rank, world_size, cluster_spec, batch_size,
replicas_to_aggregate, light_target=False):
logging.info("Initial.")
job_name = "ps" if is_ps else "worker"
cluster = tf.train.ClusterSpec(cluster_spec)
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.2)
session_conf = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
server = tf.train.Server(
cluster, job_name=job_name, task_index=rank, config=session_conf)
if is_ps:
server.join()
else:
# Pin variables to parameter server.
device_fn = tf.train.replica_device_setter(
ps_tasks=None,
ps_device="/job:ps",
worker_device="/job:{}/task:{}/device:GPU:{}".format(
job_name, rank, rank),
merge_devices=True,
cluster=cluster,
ps_ops=None,
ps_strategy=None)
with tf.Graph().as_default():
with tf.device(device_fn):
data_loader = DatasetCifar(
dataset='cifar-10',
dataset_root='/datasets',
batch_size=batch_size,
world_size=world_size,
rank=rank,
seed=42,
tf_dtype=tf.float32)
train_op, loss, metrics, hooks = define_graph(
data_loader.inputs,
data_loader.labels,
data_loader.training,
batch_size,
replicas_to_aggregate)
local_init_op = tf.group(
tf.local_variables_initializer(),
data_loader.train_init_op,
data_loader.validation_init_op)
scaffold = tf.train.Scaffold(
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=local_init_op)
lr_tensor_name = tf.get_default_graph().get_tensor_by_name("learning_rate:0")
with tf.train.MonitoredTrainingSession(config=session_conf,
master=server.target,
scaffold=scaffold,
is_chief=(rank == 0),
checkpoint_dir=None,
save_checkpoint_secs=None,
save_summaries_steps=None,
stop_grace_period_secs=5,
hooks=hooks) as sess:
logging.info("Begin training.")
final_epoch = 164
if light_target:
goal = task1_time_to_accuracy_light_goal()
else:
goal = task1_time_to_accuracy_goal()
tracker = Tracker(metrics, run_id, rank, goal=goal)
tracker.start()
for i_epoch in range(final_epoch):
logging.debug("=> Epoch {}".format(i_epoch))
train_round(sess, data_loader.train_init_op, train_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_train,
tracker, lr_tensor=lr_tensor_name,
lr_scheduler_level='epoch')
validation_round(sess, data_loader.validation_init_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_eval,
tracker)
tracker.epoch_end()
if tracker.goal_reached:
print("Goal Reached!")
return
logging.info("Finish.")
def configure_logger(log_dir, is_ps, rank):
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'{:6} rank={} : %(message)s'.format("ps" if is_ps else "worker", rank),
"%Y-%m-%d %H:%M:%S")
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_name = '{}-{}.log'.format("ps" if is_ps else "worker", rank)
log_name = os.path.join(log_dir, log_name)
if os.path.exists(log_name):
os.remove(log_name)
fh = logging.FileHandler(log_name)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process run parameters')
parser.add_argument('--run_id', type=str, help='The id of the run')
parser.add_argument('--hosts', type=str, help='The hosts participating in this run')
parser.add_argument('--light', action='store_true', default=False,
help='Train to light target metric goal')
args = parser.parse_args()
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
hosts = args.hosts.split(",")
if len(hosts) < 2:
raise ValueError("At least 2 pods are needed for this benchmark (1 parameter server, 1 worker)")
workers = [h + ":22222" for h in hosts[1:]]
ps = hosts[0] + ":22222" # First worker is the parameter server
cluster_spec = {"worker": workers,
"ps": [ps]}
# Parse role in the cluster by rank.
is_ps = rank < len(cluster_spec['ps'])
rank = rank if is_ps else rank - len(cluster_spec['ps'])
world_size = size - len(cluster_spec['ps'])
# Configure Logging
if not os.path.exists('/mlbench'):
os.makedirs('/mlbench')
configure_logger('/mlbench', is_ps, rank)
batch_size = 128
replicas_to_aggregate = len(cluster_spec['worker'])
main(is_ps, args.run_id, rank, world_size, cluster_spec,
batch_size, replicas_to_aggregate, light_target=args.light)
| 35.200743 | 161 | 0.598479 | [
"Apache-2.0"
] | mlbench/mlbench-benchmarks | tensorflow/imagerecognition/openmpi-cifar10-resnet20-all-reduce/main.py | 9,469 | Python |
from pathlib import Path, PurePath
class Directory:
def __init__(self, path_in_image, path_out_image, path_out_file):
self.__path_in_image = path_in_image
self.__path_out_image = path_out_image
self.__path_out_file = path_out_file
self.__allowed_image_extension = ['.jpg', '.jpeg', '.png']
@property
def get_path_in_image(self):
return self.__path_in_image
@property
def get_path_out_image(self):
return self.__path_out_image
@property
def get_path_out_file(self):
return self.__path_out_file
@property
def get_allowed_image_extension(self):
return self.__allowed_image_extension
def get_images(self):
file_path_list = []
for file in self.get_path_in_image.iterdir():
if file.is_file():
for image_extension in self.get_allowed_image_extension:
if file.suffix == image_extension:
file_path_list.append(PurePath.joinpath(
self.get_path_in_image, file))
return file_path_list
def move_decoded_image(self, path, operation_id):
file_extension = path.suffix
new_file_name = operation_id + file_extension
Path.rename(path, self.get_path_out_image / new_file_name)
| 30.767442 | 72 | 0.663643 | [
"MIT"
] | paaarx/tax_receipt | tax_receipt/directory.py | 1,323 | Python |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import List
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.loggable import Loggable
class BaseTagModel(Loggable):
items = List
# ============= EOF =============================================
| 37.322581 | 81 | 0.543647 | [
"Apache-2.0"
] | ASUPychron/pychron | pychron/pipeline/tagging/base_tags.py | 1,157 | Python |
import time
import types
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertIn(call(1, 2, 3), [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
def test_call_with_name(self):
self.assertEqual(_Call((), 'foo')[0], 'foo')
self.assertEqual(_Call((('bar', 'barz'),),)[0], '')
self.assertEqual(_Call((('bar', 'barz'), {'hello': 'world'}),)[0], '')
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
mock = create_autospec(SomeClass)
self._check_someclass_mock(mock)
mock = create_autospec(SomeClass())
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self, x):
pass
class Bar(object):
def f(self, y):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a(x=5)
this_mock.a.assert_called_with(x=5)
this_mock.a.assert_called_with(5)
self.assertRaises(TypeError, this_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a(5)
instance_mock.a.assert_called_with(5)
instance_mock.a.assert_called_with(x=5)
self.assertRaises(TypeError, instance_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f(6)
instance_mock.Bar.f.assert_called_with(6)
instance_mock.Bar.f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f(6)
instance_mock.Bar().f.assert_called_with(6)
instance_mock.Bar().f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
mock.assert_called_with(1, b=2)
mock.assert_called_with(a=1, b=2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
mock.f.assert_called_with(a=3, b=4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock.assert_called_once_with(a=1)
self.assertRaises(AssertionError, mock.assert_called_once_with, 2)
mock(4, 5)
mock.assert_called_with(4, 5)
mock.assert_called_with(a=4, b=5)
self.assertRaises(AssertionError, mock.assert_called_with, a=5, b=4)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self, x, y):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock(1, 2)
mock.assert_called_once_with(1, 2)
mock.assert_called_once_with(x=1, y=2)
self.assertRaises(TypeError, mock, 'a')
instance = mock(1, 2)
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with('a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
instance.assert_called_with(a='a')
mock = create_autospec(Callable(1, 2))
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
a.f(self=10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
def test_autospec_data_descriptor(self):
class Descriptor(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, cls=None):
if obj is None:
return self
return self.value
def __set__(self, obj, value):
pass
class MyProperty(property):
pass
class Foo(object):
__slots__ = ['slot']
@property
def prop(self):
return 3
@MyProperty
def subprop(self):
return 4
desc = Descriptor(42)
foo = create_autospec(Foo)
def check_data_descriptor(mock_attr):
# Data descriptors don't have a spec.
self.assertIsInstance(mock_attr, MagicMock)
mock_attr(1, 2, 3)
mock_attr.abc(4, 5, 6)
mock_attr.assert_called_once_with(1, 2, 3)
mock_attr.abc.assert_called_once_with(4, 5, 6)
# property
check_data_descriptor(foo.prop)
# property subclass
check_data_descriptor(foo.subprop)
# class __slot__
check_data_descriptor(foo.slot)
# plain data descriptor
check_data_descriptor(foo.desc)
def test_autospec_on_bound_builtin_function(self):
meth = types.MethodType(time.ctime, time.time())
self.assertIsInstance(meth(), str)
mocked = create_autospec(meth)
# no signature, so no spec to check against
mocked()
mocked.assert_called_once_with()
mocked.reset_mock()
mocked(4, 5, 6)
mocked.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertIn(kall, mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertIn(calls, mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4)]
self.assertIn(calls, mock.call_args_list)
self.assertNotIn(call('fish'), mock.call_args_list)
self.assertNotIn([call('fish')], mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
| 29.372756 | 80 | 0.559354 | [
"Apache-2.0"
] | 4nkitd/pyAutomation | Mark_attandance_py_selenium/py/App/Python/Lib/unittest/test/testmock/testhelpers.py | 27,816 | Python |
# Generated by Django 3.2.4 on 2021-08-06 15:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_alter_tag_tag_name'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='subject',
),
]
| 18.166667 | 47 | 0.590214 | [
"MIT"
] | Gandabh/E-commerce-Site | blog/migrations/0003_remove_comment_subject.py | 327 | Python |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
| 38.766234 | 79 | 0.605109 | [
"Apache-2.0"
] | 0tt3r/OpenFermion | src/openfermion/ops/_binary_code.py | 11,940 | Python |
from concurrent.futures import ThreadPoolExecutor, CancelledError
from aiomysql import create_pool
from asyncio import ensure_future, gather, sleep
from pymysql.err import OperationalError
from logging import getLogger
from sanic import Sanic
from sanic.request import Request
from sanic.response import json
from sanic.exceptions import abort
from kiwi.database.DataAccessor import DataAccessor
from kiwi.Recommender import Recommender
from kiwi.config import read_mysql_config, read_config
from kiwi.TransferTypes import create_vote
from kiwi.AsyncContentWrapper import AsyncContentWrapper
from kiwi.ContentEngine import ContentEngine
from kiwi.ActivationCalculator import ActivationCalculator
import time
app = Sanic(__name__)
def create_accessor(context):
return DataAccessor(conn=context)
async def repeated_pool(loop, sleeper, tries):
n = 1
while n <= tries:
try:
return await create_pool(**read_mysql_config()._asdict(),
autocommit=True,
loop=loop,
pool_recycle=600)
except OperationalError as e:
getLogger('root').warn(e)
getLogger('root').warn("Waiting {}s before retry".format(sleeper))
await sleep(sleeper)
n += 1
return await create_pool(**read_mysql_config()._asdict(),
autocommit=True,
loop=loop,
pool_recycle=600)
async def retrain(context, loop):
print("Start training...")
start = time.time()
async with context.pool.acquire() as conn:
accessor = create_accessor(conn)
content_frame = await accessor.get_content_frame()
rating_frame = await accessor.get_vote_frame()
print("Collected data in {}".format(time.time() - start))
algorithm = ContentEngine(
content_frame,
rating_frame)
predictor = AsyncContentWrapper(
loop, context.executor, algorithm)
await predictor.fit()
print("Completed training in {}s".format(time.time() - start))
context.algorithm = algorithm
context.predictor = predictor
@app.listener("before_server_start")
async def setup(context, loop):
context.executor = ThreadPoolExecutor()
context.pool = await repeated_pool(loop, 5, 10)
await retrain(context, loop)
@app.middleware("request")
async def generate_accessor(request):
request['conn'] = await app.pool.acquire()
request['accessor'] = create_accessor(request['conn'])
@app.middleware("response")
async def teardown_accessor(request, response):
await request['conn'].ensure_closed()
app.pool.release(request['conn'])
@app.listener("before_server_stop")
async def teardown(context, loop):
context.run_retrain = False
context.executor.shutdown()
context.pool.close()
await context.pool.wait_closed()
@app.get('/recommendation')
async def recommend(request):
'''
Gets recommendations for user
Expects args in query string form -> user=x&count=n
Returns json object {posts, unvoted, user, meta}
'''
args = request.raw_args
recommender = Recommender(
app.predictor, request['accessor'], read_config())
posts = await recommender.recommend_for(args['user'],
int(args.get('count', 10)))
return json(posts)
@app.post('/feedback')
async def feedback(request: Request):
'''Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.
Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service.'''
vote = request.json['vote']
config = read_config()
recommender = Recommender(
app.predictor, request['accessor'], config)
try:
vote_result = await recommender.store_feedback(
create_vote(vote, config['positive_cutoff']))
return json(vote_result)
except KeyError:
abort(400, "Unknown user")
@app.post('/content')
async def content(request: Request):
'''
Inserts posts into the database. The request needs the format
{ "posts": [{"id": string, "tags": string}]}.
Returns the amout of inserted items and 200-OK.
'''
filtered_posts = [(post['id'], post['tags'])
for post in request.json['posts']]
inserted = await request['accessor'].add_content(filtered_posts)
if inserted > 0:
ensure_future(retrain(app, app.loop))
return json({"inserted_count": inserted})
@app.get('/predict')
async def predict(request: Request):
recommender = Recommender(
app.predictor, request['accessor'], read_config())
user = request.raw_args['user']
item = request.raw_args['item']
result = await recommender.predict(user, item)
return json(result)
@app.get('/activation')
async def activation(request: Request):
'''
Returns the activation value for the given set of heuristics
'''
heuristics = request.json['heuristics']
try:
utv = await app.predictor.get_user_taste_vector(heuristics["user"])
except KeyError:
utv = None
ac = ActivationCalculator(heuristics, request['accessor'])
a = await ac.get_activation(utv)
return json({"activation": a, 'received_heuristics': heuristics})
@app.post('/training')
async def training(request: Request):
votes = request.json['votes']
config = read_config()
do_retrain = request.json.get('retrain', False)
inserted_user = await request['accessor'].batch_register_users(
{str(vote[0]) for vote in votes})
inserted = await request['accessor'].insert_votes(
(str(vote[0]), str(vote[1]), 1 if float(vote[2]) > config['positive_cutoff'] else -1) for vote in votes)
if do_retrain:
ensure_future(retrain(app, app.loop))
return json({
'inserted_users': inserted_user,
'inserted_votes': inserted})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
| 33.461538 | 126 | 0.669294 | [
"MIT"
] | bubblegumsoldier/kiwi | kiwi-content/kiwi/app.py | 6,090 | Python |
import click
import subprocess
import os
@click.group()
def cli():
...
@cli.command()
def deploy():
click.echo("Running chalice deploy")
output = subprocess.check_output(f"source {os.environ['VIRTUAL_ENV']}/bin/activate && chalice deploy",shell=True)
click.echo(output)
click.echo(os.environ["VIRTUAL_ENV"])
| 18.5 | 117 | 0.693694 | [
"Apache-2.0"
] | foretheta/whatsup | scripts/whatsup.py | 333 | Python |
# tested
from boa.builtins import take
def Main(amount):
str1 = 'helloworld1234567'
str2 = take(str1, amount)
return str2
| 10.769231 | 30 | 0.664286 | [
"MIT"
] | CityOfZion/neo-boa | boa_test/example/TakeTest.py | 140 | Python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import pytest
import numpy as np
import pandas as pd
import mindspore.dataset as de
from mindspore import log as logger
import mindspore.dataset.transforms.vision.c_transforms as vision
def test_numpy_slices_list_1():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert data[0] == np_data[i]
def test_numpy_slices_list_2():
logger.info("Test Slicing a 2D list into 1D list.")
np_data = [[1, 2], [3, 4]]
ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i]).all()
def test_numpy_slices_list_3():
logger.info("Test Slicing list in the first dimension.")
np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i]).all()
def test_numpy_slices_list_append():
logger.info("Test reading data of image list.")
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
resize_height, resize_width = 2, 2
data1 = de.TFRecordDataset(DATA_DIR)
resize_op = vision.Resize((resize_height, resize_width))
data1 = data1.map(input_columns=["image"], operations=[vision.Decode(True), resize_op])
res = []
for data in data1.create_dict_iterator():
res.append(data["image"])
ds = de.NumpySlicesDataset(res, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, res[i]).all()
def test_numpy_slices_dict_1():
logger.info("Test Dictionary structure data.")
np_data = {"a": [1, 2], "b": [3, 4]}
ds = de.NumpySlicesDataset(np_data, shuffle=False)
res = [[1, 3], [2, 4]]
for i, data in enumerate(ds):
assert data[0] == res[i][0]
assert data[1] == res[i][1]
def test_numpy_slices_tuple_1():
logger.info("Test slicing a list of tuple.")
np_data = [([1, 2], [3, 4]), ([11, 12], [13, 14]), ([21, 22], [23, 24])]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, np_data[i]).all()
assert sum([1 for _ in ds]) == 3
def test_numpy_slices_tuple_2():
logger.info("Test slicing a tuple of list.")
np_data = ([1, 2], [3, 4], [5, 6])
expected = [[1, 3, 5], [2, 4, 6]]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, expected[i]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_tuple_3():
logger.info("Test reading different dimension of tuple data.")
features, labels = np.random.sample((5, 2)), np.random.sample((5, 1))
data = (features, labels)
ds = de.NumpySlicesDataset(data, column_names=["col1", "col2"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0], features[i]).all()
assert data[1] == labels[i]
def test_numpy_slices_csv_value():
logger.info("Test loading value of csv file.")
csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
df = pd.read_csv(csv_file)
target = df.pop("target")
df.pop("state")
np_data = (df.values, target.values)
ds = de.NumpySlicesDataset(np_data, column_names=["col1", "col2"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(np_data[0][i], data[0]).all()
assert np.equal(np_data[1][i], data[1]).all()
def test_numpy_slices_csv_dict():
logger.info("Test loading csv file as dict.")
csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
df = pd.read_csv(csv_file)
df.pop("state")
res = df.values
ds = de.NumpySlicesDataset(dict(df), shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data, res[i]).all()
def test_numpy_slices_num_samplers():
logger.info("Test num_samplers.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, shuffle=False, num_samples=2)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_distributed_sampler():
logger.info("Test distributed sampler.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, shuffle=False, shard_id=0, num_shards=4)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i * 4]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_distributed_shard_limit():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
num = sys.maxsize
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, num_shards=num, shard_id=0, shuffle=False)
assert "Input num_shards is not within the required interval of (1 to 2147483647)." in str(err.value)
def test_numpy_slices_distributed_zero_shard():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, num_shards=0, shard_id=0, shuffle=False)
assert "Input num_shards is not within the required interval of (1 to 2147483647)." in str(err.value)
def test_numpy_slices_sequential_sampler():
logger.info("Test numpy_slices_dataset with SequentialSampler and repeat.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, sampler=de.SequentialSampler()).repeat(2)
for i, data in enumerate(ds):
assert np.equal(data[0], np_data[i % 8]).all()
def test_numpy_slices_invalid_column_names_type():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(TypeError) as err:
de.NumpySlicesDataset(np_data, column_names=[1], shuffle=False)
assert "Argument column_names[0] with value 1 is not of type (<class 'str'>,)." in str(err.value)
def test_numpy_slices_invalid_column_names_string():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, column_names=[""], shuffle=False)
assert "column_names[0] should not be empty" in str(err.value)
def test_numpy_slices_invalid_empty_column_names():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, column_names=[], shuffle=False)
assert "column_names should not be empty" in str(err.value)
def test_numpy_slices_invalid_empty_data_column():
logger.info("Test incorrect column_names input")
np_data = []
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, shuffle=False)
assert "Argument data cannot be empty" in str(err.value)
if __name__ == "__main__":
test_numpy_slices_list_1()
test_numpy_slices_list_2()
test_numpy_slices_list_3()
test_numpy_slices_list_append()
test_numpy_slices_dict_1()
test_numpy_slices_tuple_1()
test_numpy_slices_tuple_2()
test_numpy_slices_tuple_3()
test_numpy_slices_csv_value()
test_numpy_slices_csv_dict()
test_numpy_slices_num_samplers()
test_numpy_slices_distributed_sampler()
test_numpy_slices_distributed_shard_limit()
test_numpy_slices_distributed_zero_shard()
test_numpy_slices_sequential_sampler()
test_numpy_slices_invalid_column_names_type()
test_numpy_slices_invalid_column_names_string()
test_numpy_slices_invalid_empty_column_names()
test_numpy_slices_invalid_empty_data_column()
| 32.442748 | 105 | 0.675647 | [
"Apache-2.0"
] | Gavin-Hoang/mindspore | tests/ut/python/dataset/test_dataset_numpy_slices.py | 8,500 | Python |
""" Bu kod MQTT den alir FireBase e atar """
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import paho.mqtt.client as mqtt
from time import sleep
import json
import sys
Fb_Coll = "color"
def main():
x = open("../ip.json")
data_ = json.load(x)
ip = data_["ip"]
x.close()
cred = credentials.Certificate("../Login.json")
firebase_admin.initialize_app(cred)
Server = ip
db = firestore.client()
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("/blue")
print("Topics Subscribed Successfully")
def on_message(client, userdata, msg):
topic = msg.topic
data = msg.payload.decode('UTF-8')
print("Mqtt'den: Topic: {}, Message: {}".format(topic,data))
if topic =="/blue":
db.collection(Fb_Coll).document("color").update({ topic: data })
print("SEND")
sleep(1)
client = mqtt.Client()
client.connect(Server, 1883, 60)
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nPrograms was stopped")
sys.exit()
| 24.344828 | 77 | 0.580737 | [
"MIT"
] | KJPOUNDY132/mqtt_to_firebase | MyAwsomeMainCode/send_.py | 1,412 | Python |
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for building/saving/loading TensorRT Engine
import sys
import os
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from utils.modeldata import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
from common import HostDeviceMem
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser, trt.Runtime(trt_logger) as runtime:
config.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
def save_engine(engine, engine_dest_path):
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
| 37.690909 | 187 | 0.713459 | [
"Apache-2.0"
] | GreyZzzzzzXh/TensorRT | samples/python/uff_ssd/utils/engine.py | 4,146 | Python |
class CouponNotFoundException(Exception):
pass | 25 | 41 | 0.82 | [
"MIT"
] | daniellima/desafio-lojaintegrada | src/app/coupon/coupon_not_found_exception.py | 50 | Python |
from .imports import *
def rainbow_to_vector(r, timeformat='h'):
""" Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths]
"""
secondformat = ['second', 'seconds', 'sec', 's']
minuteformat = ['minute', 'minutes', 'min', 'm']
hourformat = ['hour', 'hours', 'h']
dayformat = ['day', 'days', 'd']
yearformat = ['year', 'years', 'y']
rflux = r.fluxlike['flux'] # flux (MJy/sr) : [n_wavelengths x n_integrations]
rfluxe = r.fluxlike['uncertainty'] # flux error (MJy/sr) : [n_wavelengths x n_integrations]
rtime = r.timelike['time'] # time (BJD_TDB, hours) : [n_integrations]
rwavel = r.wavelike['wavelength'] # wavelength (microns) : [n_wavelengths]
# change the time array into the requested format (e.g. seconds, minutes, days etc.)
if timeformat in secondformat:
rtime = rtime * 3600
elif timeformat in minuteformat:
rtime = rtime * 60
elif timeformat in hourformat:
# hours is the default time setting
pass
elif timeformat in dayformat:
rtime = rtime / 24.
elif timeformat in yearformat:
rtime = rtime / (24 * 365.)
else:
warnings.warn("Unrecognised Time Format!")
return
return rflux, rfluxe, rtime, rwavel
def rainbow_to_df(r, timeformat='h'):
""" Convert Rainbow object to pandas dataframe
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into pandas df format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
pd.DataFrame
"""
rflux, rfluxe, rtime, rwavel = rainbow_to_vector(r, timeformat)
x, y = np.meshgrid(rtime.to_value(), rwavel.to_value())
rainbow_dict = {f"Time ({timeformat})": x.ravel(), "Wavelength (microns)": y.ravel(), "Flux": rflux.ravel(),
"Flux Error": rfluxe.ravel()}
df = pd.DataFrame(rainbow_dict)
return df
def bin_data(jd, y, mins_jd):
t = np.array(jd)
split = []
sorted_t = t
sorted_y = y
start = sorted_t[0]
nextbin = sorted_t[np.absolute(sorted_t - start) > mins_jd]
while nextbin != []:
start = start + mins_jd
ind_st = np.argmax(sorted_t > start)
if len(split) > 0:
if ind_st != split[-1]:
split.append(ind_st)
time = sorted_t[ind_st:]
# need to add defn for time here?
else:
split.append(ind_st)
time = sorted_t[ind_st:]
nextbin = time[np.absolute(time - start) > mins_jd]
times = np.split(sorted_t, split)
ys = np.split(sorted_y, split)
bins = np.zeros(len(times))
binned_y = np.zeros(len(times))
binned_err = np.zeros(len(times))
for i in range(len(times)):
if len(ys[i]) > 0:
try:
bins[i] = np.nanmean(times[i])
binned_y[i] = np.nanmean(ys[i])
n = len(times[i])
# standard error in the median:
# binned_err[i] = 1.253 * np.nanstd(ys[i]) / np.sqrt(n)
binned_err[i] = np.nanstd(ys[i]) / np.sqrt(n)
except Exception as e:
print(e)
pass
bin_t = bins[binned_y != 0]
bin_e = binned_err[binned_y != 0]
bin_y = binned_y[binned_y != 0]
return bin_t, bin_y, bin_e
def find_nearest(array, value):
# array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def remove_nans(arr_with_nans,*otherarrs):
nanfree_arrs = []
for arr in otherarrs:
nanfree_arrs.append(arr[~np.isnan(arr_with_nans)])
arr_without_nans = arr_with_nans[~np.isnan(arr_with_nans)]
return arr_without_nans, nanfree_arrs | 32.910448 | 112 | 0.578005 | [
"MIT"
] | catrionamurray/chromatic_fitting | src/utils.py | 4,410 | Python |
"""
This test will initialize the display using displayio
and draw a solid red background
"""
import board
import displayio
from adafruit_st7735r import ST7735R
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
displayio.release_displays()
display_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs, reset=board.D9)
display = ST7735R(display_bus, width=128, height=160, bgr=True)
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(128, 160, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFF0000
bg_sprite = displayio.TileGrid(color_bitmap,
pixel_shader=color_palette,
x=0, y=0)
splash.append(bg_sprite)
while True:
pass
| 24.294118 | 90 | 0.690073 | [
"MIT"
] | jadudm/feather-isa | infra/libs-400rc2-20190512/examples/st7735r_128x160_simpletest.py | 826 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
import sys
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, False, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
# txindex is enabled by default in Dash and needs to be disabled for import-rescan.py
extra_args[i] += ["-prune=1", "-txindex=0", "-reindex"]
self.add_nodes(self.num_nodes, extra_args, stderr=sys.stdout)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
self.sync_blocks()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_blocks()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| 47.631579 | 116 | 0.654365 | [
"MIT"
] | BlueScionic/vivarium | test/functional/import-rescan.py | 9,050 | Python |
shisu_risto = [1, 0, 2]
kazu_risto = [1, 4, 6]
tou_risto = []
for ai in range(len(shisu_risto)):
tou_risto.append(kazu_risto[shisu_risto[ai]])
print(tou_risto)
| 16.7 | 49 | 0.682635 | [
"MIT"
] | ALFA-group/neural-program-comprehension | stimuli/Python/one_file_per_item/jap/33_# math_for 15.py | 167 | Python |
#
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
"""Test case for RootController /"""
import json
from conductor import version
from conductor.tests.unit.api import base_api
class TestRoot(base_api.BaseApiTest):
def test_get_index(self):
actual_response = self.app.get('/')
req_json_file = './conductor/tests/unit/api/controller/versions.json'
expected_response = json.loads(open(req_json_file).read())
versions = expected_response.get('versions')
for version_obj in versions:
version_obj['version'] = "of-has:{}".format(version.version_info.version_string())
self.assertEqual(200, actual_response.status_int)
self.assertJsonEqual(expected_response,
json.loads(actual_response.body.decode()))
| 38.4 | 94 | 0.639323 | [
"Apache-2.0"
] | onap/optf-has | conductor/conductor/tests/unit/api/controller/test_root.py | 1,536 | Python |
from apollo.viewsets import UserViewSet
from applications.assets.viewsets import EquipmentViewSet, ServiceViewSet
from applications.business.viewsets import BusinessViewSet, BusinessMembershipViewSet
from applications.charge_list.viewsets import ChargeListViewSet, ActivityChargeViewSet, \
ActivityChargeActivityCountViewSet, TimeChargeViewSet, UnitChargeViewSet
from applications.price_list.viewsets import PriceListViewSet, ActivityPriceListItemViewSet, TimePriceListItemViewSet, \
UnitPriceListItemViewSet, PriceListItemEquipmentViewSet, PriceListItemServiceViewSet
from applications.station.viewsets import StationViewSet, StationBusinessViewSet, StationRentalViewSet
from applications.terms_of_service.viewsets import TermsOfServiceViewSet
from rest_framework.routers import DefaultRouter
# Internal API Definition
router = DefaultRouter()
router.register(r'account/user', UserViewSet, base_name='user')
router.register(r'account/terms_of_service', TermsOfServiceViewSet, base_name='terms-of-service')
router.register(r'business/business', BusinessViewSet, base_name='business')
router.register(r'business/business_membership', BusinessMembershipViewSet, base_name='business-membership')
router.register(r'equipment/equipment', EquipmentViewSet, base_name='equipment')
router.register(r'equipment/service', ServiceViewSet, base_name='service')
router.register(r'station/station', StationViewSet, base_name='station')
router.register(r'station/station_business', StationBusinessViewSet, base_name='station-business')
router.register(r'station/station_rental', StationRentalViewSet, base_name='station-rental')
router.register(r'price_list/price_list', PriceListViewSet, base_name='price-list')
router.register(r'price_list/activity_item', ActivityPriceListItemViewSet, base_name='activity-price-list-item')
router.register(r'price_list/time_item', TimePriceListItemViewSet, base_name='time-price-list-item')
router.register(r'price_list/unit_item', UnitPriceListItemViewSet, base_name='unit-price-list-item')
router.register(r'price_list/equipment_relation', PriceListItemEquipmentViewSet, base_name='price-list-item-equipment')
router.register(r'price_list/service_relation', PriceListItemServiceViewSet, base_name='price-list-item-service')
router.register(r'charge_list/charge_list', ChargeListViewSet, base_name='charge-list')
router.register(r'charge_list/activity_charge', ActivityChargeViewSet, base_name='activity-charge')
router.register(r'charge_list/activity_charge_activity_count', ActivityChargeActivityCountViewSet,
base_name='activity-charge-activity-count')
router.register(r'charge_list/time_charge', TimeChargeViewSet, base_name='time-charge')
router.register(r'charge_list/unit_charge', UnitChargeViewSet, base_name='unit-charge')
| 75.162162 | 120 | 0.848256 | [
"MIT"
] | awwong1/apollo | apollo/router.py | 2,781 | Python |
from whey.mixin import BuilderMixin
class GettextMixin:
def build_messages(self: BuilderMixin):
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
locales = self.pkgdir / "locales"
if self.verbose:
print(" Building messages")
for po in locales.glob("*/LC_MESSAGES/pm2hw.po"):
with po.open("rt", encoding="UTF-8") as f:
catalog = read_po(f, po.parts[-3], po.stem)
mo = self.build_dir / po.relative_to(self.project_dir).with_suffix(".mo")
mo.parent.maybe_make(parents=True)
with mo.open("wb") as f:
write_mo(f, catalog)
self.report_written(mo)
if self.verbose:
print(" Wrote language file:", mo)
# class SDistBuilder(GettextMixin, builder.SDistBuilder):
# def call_additional_hooks(self):
# self.build_messages()
# class WheelBuilder(GettextMixin, builder.WheelBuilder):
# def call_additional_hooks(self):
# self.build_messages()
| 27.205882 | 76 | 0.718919 | [
"MPL-2.0"
] | bedwardly-down/pm2hw | build_hooks.py | 925 | Python |
# coding: utf-8
"""
"""
from copy import deepcopy
import datetime
import io
import json
import math
import os
import zipfile
import flask
import flask_login
import itsdangerous
import werkzeug.utils
from flask_babel import _
from . import frontend
from .. import logic
from .. import models
from .. import db
from ..logic import user_log, object_log, comments, object_sorting
from ..logic.actions import get_action, get_actions, get_action_type, get_action_types
from ..logic.action_type_translations import get_action_types_with_translations_in_language, \
get_action_type_with_translation_in_language
from ..logic.action_translations import get_action_with_translation_in_language
from ..logic.action_permissions import get_user_action_permissions, get_sorted_actions_for_user
from ..logic.object_permissions import Permissions, get_user_object_permissions, object_is_public, get_object_permissions_for_users, set_object_public, set_user_object_permissions, set_group_object_permissions, set_project_object_permissions, get_objects_with_permissions, get_object_info_with_permissions, get_object_permissions_for_groups, get_object_permissions_for_projects, request_object_permissions
from ..logic.datatypes import JSONEncoder
from ..logic.instrument_translations import get_instrument_with_translation_in_language
from ..logic.users import get_user, get_users, get_users_by_name
from ..logic.schemas import validate, generate_placeholder
from ..logic.settings import get_user_settings, set_user_settings
from ..logic.object_search import generate_filter_func, wrap_filter_func
from ..logic.groups import get_group, get_user_groups
from ..logic.objects import create_object, create_object_batch, update_object, get_object, get_object_versions
from ..logic.object_log import ObjectLogEntryType
from ..logic.projects import get_project, get_user_projects, get_user_project_permissions
from ..logic.locations import get_location, get_object_ids_at_location, get_object_location_assignment, get_object_location_assignments, get_locations, assign_location_to_object, get_locations_tree
from ..logic.languages import get_language_by_lang_code, get_language, get_languages, Language, get_user_language
from ..logic.files import FileLogEntryType
from ..logic.errors import GroupDoesNotExistError, ObjectDoesNotExistError, UserDoesNotExistError, ActionDoesNotExistError, ValidationError, ProjectDoesNotExistError, LocationDoesNotExistError, ActionTypeDoesNotExistError
from ..logic.notebook_templates import get_notebook_templates
from .objects_forms import ObjectPermissionsForm, ObjectForm, ObjectVersionRestoreForm, ObjectUserPermissionsForm, CommentForm, ObjectGroupPermissionsForm, ObjectProjectPermissionsForm, FileForm, FileInformationForm, FileHidingForm, ObjectLocationAssignmentForm, ExternalLinkForm, ObjectPublicationForm, CopyPermissionsForm
from ..utils import object_permissions_required
from .utils import jinja_filter, generate_qrcode
from .object_form_parser import parse_form_data
from .labels import create_labels, PAGE_SIZES, DEFAULT_PAPER_FORMAT, HORIZONTAL_LABEL_MARGIN, VERTICAL_LABEL_MARGIN, mm
from . import pdfexport
from .utils import check_current_user_is_not_readonly
from ..logic.utils import get_translated_text
__author__ = 'Florian Rhiem <[email protected]>'
def on_unauthorized(object_id):
permissions_by_user = get_object_permissions_for_users(object_id)
has_grant_user = any(
Permissions.GRANT in permissions
for permissions in permissions_by_user.values()
)
return flask.render_template('objects/unauthorized.html', object_id=object_id, has_grant_user=has_grant_user), 403
@frontend.route('/objects/')
@flask_login.login_required
def objects():
object_ids = flask.request.args.get('ids', '')
objects = []
display_properties = []
display_property_titles = {}
user_language_id = logic.languages.get_user_language(flask_login.current_user).id
if 'display_properties' in flask.request.args:
for property_info in flask.request.args.get('display_properties', '').split(','):
if ':' in property_info:
property_name, property_title = property_info.split(':', 1)
else:
property_name, property_title = property_info, property_info
if property_name not in display_properties:
display_properties.append(property_name)
display_property_titles[property_name] = property_title
name_only = True
if object_ids:
object_ids = object_ids.split(',')
try:
object_ids = [int(object_id) for object_id in object_ids]
except ValueError:
object_ids = []
readable_object_ids = []
for object_id in object_ids:
if Permissions.READ in get_user_object_permissions(object_id, user_id=flask_login.current_user.id):
readable_object_ids.append(object_id)
object_ids = readable_object_ids
for object_id in object_ids:
try:
objects.append(get_object(object_id))
except logic.errors.ObjectDoesNotExistError:
pass
action_id = None
action = None
action_type = None
project_id = None
location_id = None
location = None
user = None
user_id = None
doi = None
object_ids_at_location = None
project = None
group = None
group_id = None
query_string = ''
use_advanced_search = False
must_use_advanced_search = False
advanced_search_had_error = False
search_notes = []
search_tree = None
limit = None
offset = None
pagination_enabled = True
num_objects_found = len(objects)
sorting_property_name = None
sorting_order_name = None
else:
pagination_enabled = True
try:
user_id = int(flask.request.args.get('user', ''))
user = get_user(user_id)
except ValueError:
user_id = None
user = None
except UserDoesNotExistError:
user_id = None
user = None
if user_id is not None:
user_permissions = {
'read': Permissions.READ,
'write': Permissions.WRITE,
'grant': Permissions.GRANT
}.get(flask.request.args.get('user_permissions', '').lower())
else:
user_permissions = None
try:
doi = logic.publications.simplify_doi(flask.request.args.get('doi', ''))
except logic.errors.InvalidDOIError:
doi = None
try:
location_id = int(flask.request.args.get('location', ''))
location = get_location(location_id)
object_ids_at_location = get_object_ids_at_location(location_id)
except ValueError:
location_id = None
location = None
object_ids_at_location = None
except LocationDoesNotExistError:
location_id = None
location = None
object_ids_at_location = None
try:
action_id = int(flask.request.args.get('action', ''))
except ValueError:
action_id = None
if action_id is not None:
action = get_action_with_translation_in_language(action_id, user_language_id, use_fallback=True)
action_type = get_action_type_with_translation_in_language(action.type_id, user_language_id)
action_schema = action.schema
if not display_properties:
display_properties = action_schema.get('displayProperties', [])
for property_name in display_properties:
display_property_titles[property_name] = action_schema['properties'][property_name]['title']
else:
action = None
action_type_id = flask.request.args.get('t', '')
if action_type_id is not None:
try:
action_type_id = int(action_type_id)
except ValueError:
# ensure old links still function
action_type_id = {
'samples': models.ActionType.SAMPLE_CREATION,
'measurements': models.ActionType.MEASUREMENT,
'simulations': models.ActionType.SIMULATION
}.get(action_type_id, None)
if action_type_id is not None:
try:
action_type = get_action_type_with_translation_in_language(
action_type_id=action_type_id,
language_id=user_language_id
)
except ActionTypeDoesNotExistError:
action_type = None
else:
action_type = None
project_permissions = None
if display_properties:
name_only = False
try:
project_id = int(flask.request.args.get('project', ''))
except ValueError:
project_id = None
if project_id is not None:
if Permissions.READ not in get_user_project_permissions(project_id=project_id, user_id=flask_login.current_user.id, include_groups=True):
return flask.abort(403)
project = get_project(project_id)
project_permissions = {
'read': Permissions.READ,
'write': Permissions.WRITE,
'grant': Permissions.GRANT
}.get(flask.request.args.get('project_permissions', '').lower())
else:
project = None
group_permissions = None
try:
group_id = int(flask.request.args.get('group', ''))
except ValueError:
group_id = None
if group_id is not None:
try:
group = logic.groups.get_group(group_id)
group_member_ids = logic.groups.get_group_member_ids(group_id)
except logic.errors.GroupDoesNotExistError:
group = None
else:
if flask_login.current_user.id not in group_member_ids:
return flask.abort(403)
else:
group = None
if group is not None:
group_permissions = {
'read': Permissions.READ,
'write': Permissions.WRITE,
'grant': Permissions.GRANT
}.get(flask.request.args.get('group_permissions', '').lower())
else:
group_permissions = None
if flask.request.args.get('limit', '') == 'all':
limit = None
else:
try:
limit = int(flask.request.args.get('limit', ''))
except ValueError:
limit = None
else:
if limit <= 0:
limit = None
elif limit >= 1000:
limit = 1000
# default objects per page
if limit is None:
limit = get_user_settings(flask_login.current_user.id)['OBJECTS_PER_PAGE']
else:
set_user_settings(flask_login.current_user.id, {'OBJECTS_PER_PAGE': limit})
try:
offset = int(flask.request.args.get('offset', ''))
except ValueError:
offset = None
else:
if offset < 0:
offset = None
elif offset > 100000000:
offset = 100000000
if limit is not None and offset is None:
offset = 0
sorting_order_name = flask.request.args.get('order', None)
if sorting_order_name == 'asc':
sorting_order = object_sorting.ascending
elif sorting_order_name == 'desc':
sorting_order = object_sorting.descending
else:
sorting_order = None
sorting_property_name = flask.request.args.get('sortby', None)
if sorting_order is None:
if sorting_property_name is None:
sorting_order_name = 'desc'
sorting_order = object_sorting.descending
else:
sorting_order_name = 'asc'
sorting_order = object_sorting.ascending
if sorting_property_name is None:
sorting_property_name = '_object_id'
else:
name_only = False
if sorting_property_name == '_object_id':
sorting_property = object_sorting.object_id()
elif sorting_property_name == '_creation_date':
sorting_property = object_sorting.creation_date()
elif sorting_property_name == '_last_modification_date':
sorting_property = object_sorting.last_modification_date()
else:
sorting_property = object_sorting.property_value(sorting_property_name)
sorting_function = sorting_order(sorting_property)
query_string = flask.request.args.get('q', '')
if query_string:
name_only = False
search_tree = None
use_advanced_search = flask.request.args.get('advanced', None) is not None
must_use_advanced_search = use_advanced_search
advanced_search_had_error = False
additional_search_notes = []
if not use_advanced_search and query_string:
if user_id is None:
users = get_users_by_name(query_string)
if len(users) == 1:
user = users[0]
user_id = user.id
query_string = ''
elif len(users) > 1:
additional_search_notes.append(('error', "There are multiple users with this name.", 0, 0))
if doi is None and query_string.startswith('doi:'):
try:
doi = logic.publications.simplify_doi(query_string)
query_string = ''
except logic.errors.InvalidDOIError:
pass
try:
filter_func, search_tree, use_advanced_search = generate_filter_func(query_string, use_advanced_search)
except Exception:
# TODO: ensure that advanced search does not cause exceptions
if use_advanced_search:
advanced_search_had_error = True
def filter_func(data, search_notes):
""" Return all objects"""
search_notes.append(('error', "Unable to parse search expression", 0, len(query_string)))
return False
else:
raise
filter_func, search_notes = wrap_filter_func(filter_func)
search_notes.extend(additional_search_notes)
if user_id is None or user_permissions is not None:
object_ids_for_user = None
else:
object_ids_for_user = user_log.get_user_related_object_ids(user_id)
if doi is None:
object_ids_for_doi = None
else:
object_ids_for_doi = logic.publications.get_object_ids_linked_to_doi(doi)
if use_advanced_search and not must_use_advanced_search:
search_notes.append(('info', _("The advanced search was used automatically. Search for \"%(query_string)s\" to use the simple search.", query_string=query_string), 0, 0))
try:
object_ids = None
if object_ids_at_location is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_at_location)
if object_ids_for_user is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_for_user)
if object_ids_for_doi is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_for_doi)
if object_ids is not None:
pagination_enabled = False
limit = None
offset = None
if object_ids is not None and not object_ids:
objects = []
num_objects_found = 0
else:
num_objects_found_list = []
objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
filter_func=filter_func,
sorting_func=sorting_function,
limit=limit,
offset=offset,
action_id=action_id,
action_type_id=action_type.id if action_type is not None else None,
other_user_id=user_id,
other_user_permissions=user_permissions,
project_id=project_id,
project_permissions=project_permissions,
group_id=group_id,
group_permissions=group_permissions,
object_ids=object_ids,
num_objects_found=num_objects_found_list,
name_only=name_only
)
num_objects_found = num_objects_found_list[0]
except Exception as e:
search_notes.append(('error', "Error during search: {}".format(e), 0, 0))
objects = []
num_objects_found = 0
if any(note[0] == 'error' for note in search_notes):
objects = []
advanced_search_had_error = True
cached_actions = {}
cached_users = {}
for i, obj in enumerate(objects):
if obj.version_id == 0:
original_object = obj
else:
original_object = get_object(object_id=obj.object_id, version_id=0)
if obj.action_id not in cached_actions:
cached_actions[obj.action_id] = get_action(obj.action_id)
if obj.user_id not in cached_users:
cached_users[obj.user_id] = get_user(obj.user_id)
if original_object.user_id not in cached_users:
cached_users[original_object.user_id] = get_user(original_object.user_id)
objects[i] = {
'object_id': obj.object_id,
'created_by': cached_users[original_object.user_id],
'created_at': original_object.utc_datetime,
'modified_by': cached_users[obj.user_id],
'last_modified_at': obj.utc_datetime,
'data': obj.data,
'schema': obj.schema,
'action': cached_actions[obj.action_id],
'display_properties': {}
}
for property_name in display_properties:
if property_name not in objects[i]['data'] or '_type' not in objects[i]['data'][property_name] or property_name not in objects[i]['schema']['properties']:
objects[i]['display_properties'][property_name] = None
continue
objects[i]['display_properties'][property_name] = (objects[i]['data'][property_name], objects[i]['schema']['properties'][property_name])
if action_id is None:
show_action = True
else:
show_action = False
def build_modified_url(**kwargs):
return flask.url_for(
'.objects',
**{k: v for k, v in flask.request.args.items() if k not in kwargs},
**kwargs
)
action_ids = {
object['action'].id for object in objects
}
action_translations = {}
for id in action_ids:
action_translations[id] = logic.action_translations.get_action_translation_for_action_in_language(
action_id=id,
language_id=user_language_id,
use_fallback=True
)
return flask.render_template(
'objects/objects.html',
objects=objects,
display_properties=display_properties,
display_property_titles=display_property_titles,
search_query=query_string,
action=action,
action_translations=action_translations,
action_id=action_id,
action_type=action_type,
project=project,
project_id=project_id,
group=group,
group_id=group_id,
location_id=location_id,
location=location,
user_id=user_id,
user=user,
doi=doi,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
get_instrument_translation_for_instrument_in_language=logic.instrument_translations.get_instrument_translation_for_instrument_in_language,
build_modified_url=build_modified_url,
sorting_property=sorting_property_name,
sorting_order=sorting_order_name,
limit=limit,
offset=offset,
pagination_enabled=pagination_enabled,
num_objects_found=num_objects_found,
show_action=show_action,
use_advanced_search=use_advanced_search,
must_use_advanced_search=must_use_advanced_search,
advanced_search_had_error=advanced_search_had_error,
search_notes=search_notes,
search_tree=search_tree
)
@jinja_filter
def to_datatype(obj):
return json.loads(json.dumps(obj), object_hook=JSONEncoder.object_hook)
def get_sub_data_and_schema(data, schema, id_prefix):
sub_data = data
sub_schema = schema
try:
for key in id_prefix.split('__'):
if sub_schema['type'] == 'array':
key = int(key)
sub_schema = sub_schema['items']
elif sub_schema['type'] == 'object':
sub_schema = sub_schema['properties'][key]
else:
raise ValueError('invalid type')
if isinstance(key, int):
while key >= len(sub_data):
sub_data.append(generate_placeholder(sub_schema))
elif key not in sub_data:
sub_data[key] = generate_placeholder(sub_schema)
sub_data = sub_data[key]
if sub_schema['type'] != 'array':
raise ValueError('invalid type')
except (ValueError, KeyError, IndexError, TypeError):
# TODO: error handling/logging?
raise ValueError('invalid action')
return sub_data, sub_schema
def apply_action_to_form_data(action, form_data):
new_form_data = form_data
action_id_prefix, action_index, action_type = action[len('action_'):].rsplit('__', 2)
if action_type == 'delete':
deleted_item_index = int(action_index)
parent_id_prefix = action_id_prefix
new_form_data = {}
for name in form_data:
if not name.startswith(parent_id_prefix + '__'):
new_form_data[name] = form_data[name]
else:
item_index, id_suffix = name[len(parent_id_prefix) + 2:].split('__', 1)
item_index = int(item_index)
if item_index < deleted_item_index:
new_form_data[name] = form_data[name]
if item_index > deleted_item_index:
new_name = parent_id_prefix + '__' + str(item_index - 1) + '__' + id_suffix
new_form_data[new_name] = form_data[name]
return new_form_data
def apply_action_to_data(action, data, schema):
action_id_prefix, action_index, action_type = action[len('action_'):].rsplit('__', 2)
if action_type not in ('add', 'delete', 'addcolumn', 'deletecolumn'):
raise ValueError('invalid action')
sub_data, sub_schema = get_sub_data_and_schema(data, schema, action_id_prefix.split('__', 1)[1])
if action_type in ('addcolumn', 'deletecolumn') and (sub_schema["style"] != "table" or sub_schema["items"]["type"] != "array"):
raise ValueError('invalid action')
num_existing_items = len(sub_data)
if action_type == 'add':
if 'maxItems' not in sub_schema or num_existing_items < sub_schema["maxItems"]:
sub_data.append(generate_placeholder(sub_schema["items"]))
if isinstance(sub_data[-1], list) and sub_schema.get('style') == 'table':
num_existing_columns = sub_schema["items"].get("minItems", 0)
for row in sub_data:
num_existing_columns = max(num_existing_columns, len(row))
while len(sub_data[-1]) < num_existing_columns:
sub_data[-1].append(None)
elif action_type == 'delete':
action_index = int(action_index)
if ('minItems' not in sub_schema or num_existing_items > sub_schema["minItems"]) and action_index < num_existing_items:
del sub_data[action_index]
else:
num_existing_columns = sub_schema["items"].get("minItems", 0)
for row in sub_data:
num_existing_columns = max(num_existing_columns, len(row))
if action_type == 'addcolumn':
if 'maxItems' not in sub_schema["items"] or num_existing_columns < sub_schema["items"]["maxItems"]:
num_existing_columns += 1
for row in sub_data:
while len(row) < num_existing_columns:
row.append(generate_placeholder(sub_schema["items"]["items"]))
elif action_type == 'deletecolumn':
if num_existing_columns > sub_schema.get("minItems", 0):
num_existing_columns -= 1
for row in sub_data:
while len(row) > num_existing_columns:
del row[-1]
def show_object_form(object, action, previous_object=None, should_upgrade_schema=False, placeholder_data=None):
if object is None and previous_object is None:
data = generate_placeholder(action.schema)
if placeholder_data:
for path, value in placeholder_data.items():
try:
sub_data = data
for step in path[:-1]:
sub_data = sub_data[step]
sub_data[path[-1]] = value
except Exception:
# Ignore invalid placeholder data
pass
elif object is None and previous_object is not None:
data = logic.schemas.copy_data(previous_object.data, previous_object.schema)
else:
data = object.data
previous_object_schema = None
mode = 'edit'
if should_upgrade_schema:
mode = 'upgrade'
assert object is not None
schema = action.schema
data, upgrade_warnings = logic.schemas.convert_to_schema(object.data, object.schema, action.schema)
for upgrade_warning in upgrade_warnings:
flask.flash(upgrade_warning, 'warning')
elif object is not None:
schema = object.schema
elif previous_object is not None:
schema = previous_object.schema
previous_object_schema = schema
else:
schema = action.schema
if action is not None and action.instrument is not None and flask_login.current_user in action.instrument.responsible_users:
may_create_log_entry = True
create_log_entry_default = action.instrument.create_log_entry_default
instrument_log_categories = logic.instrument_log_entries.get_instrument_log_categories(action.instrument.id)
if 'create_instrument_log_entry' in flask.request.form:
category_ids = []
for category_id in flask.request.form.getlist('instrument_log_categories'):
try:
if int(category_id) in [category.id for category in instrument_log_categories]:
category_ids.append(int(category_id))
except Exception:
pass
else:
category_ids = None
else:
instrument_log_categories = None
category_ids = None
create_log_entry_default = None
may_create_log_entry = False
permissions_for_group_id = None
permissions_for_project_id = None
copy_permissions_object_id = None
if object is None:
if flask.request.form.get('permissions_method') == 'copy_permissions' and flask.request.form.get('copy_permissions_object_id'):
copy_permissions_object_id = flask.request.form.get('copy_permissions_object_id')
try:
copy_permissions_object_id = int(copy_permissions_object_id)
if Permissions.READ not in get_user_object_permissions(copy_permissions_object_id, flask_login.current_user.id):
flask.flash(_("Unable to copy permissions. Default permissions will be applied."), 'error')
copy_permissions_object_id = None
except Exception:
flask.flash(_("Unable to copy permissions. Default permissions will be applied."), 'error')
copy_permissions_object_id = None
elif flask.request.form.get('permissions_method') == 'permissions_for_group' and flask.request.form.get('permissions_for_group_group_id'):
permissions_for_group_id = flask.request.form.get('permissions_for_group_group_id')
try:
permissions_for_group_id = int(permissions_for_group_id)
if flask_login.current_user.id not in logic.groups.get_group_member_ids(permissions_for_group_id):
flask.flash(_("Unable to grant permissions to basic group. Default permissions will be applied."), 'error')
permissions_for_group_id = None
except Exception:
flask.flash(_("Unable to grant permissions to basic group. Default permissions will be applied."), 'error')
permissions_for_group_id = None
elif flask.request.form.get('permissions_method') == 'permissions_for_project' and flask.request.form.get('permissions_for_project_project_id'):
permissions_for_project_id = flask.request.form.get('permissions_for_project_project_id')
try:
permissions_for_project_id = int(permissions_for_project_id)
if flask_login.current_user.id not in logic.projects.get_project_member_user_ids_and_permissions(permissions_for_project_id, include_groups=True):
flask.flash(_("Unable to grant permissions to project group. Default permissions will be applied."), 'error')
permissions_for_project_id = None
except Exception:
flask.flash(_("Unable to grant permissions to project group. Default permissions will be applied."), 'error')
permissions_for_project_id = None
if previous_object is not None:
action_id = previous_object.action_id
previous_object_id = previous_object.id
has_grant_for_previous_object = Permissions.GRANT in get_user_object_permissions(user_id=flask_login.current_user.id, object_id=previous_object_id)
else:
action_id = action.id
previous_object_id = None
has_grant_for_previous_object = False
errors = []
object_errors = {}
form_data = {}
previous_actions = []
serializer = itsdangerous.URLSafeSerializer(flask.current_app.config['SECRET_KEY'])
form = ObjectForm()
if flask.request.method != 'GET' and form.validate_on_submit():
raw_form_data = {key: flask.request.form.getlist(key) for key in flask.request.form}
form_data = {k: v[0] for k, v in raw_form_data.items()}
if 'input_num_batch_objects' in form_data:
try:
num_objects_in_batch = int(form_data['input_num_batch_objects'])
except ValueError:
try:
# The form allows notations like '1.2e1' for '12', however
# Python can only parse these as floats
num_objects_in_batch = float(form_data['input_num_batch_objects'])
if num_objects_in_batch == int(num_objects_in_batch):
num_objects_in_batch = int(num_objects_in_batch)
else:
raise
except ValueError:
errors.append('input_num_batch_objects')
num_objects_in_batch = None
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
num_objects_in_batch = None
if 'previous_actions' in flask.request.form:
try:
previous_actions = serializer.loads(flask.request.form['previous_actions'])
except itsdangerous.BadData:
flask.abort(400)
if "action_submit" in form_data:
# The object name might need the batch number to match the pattern
if schema.get('batch', False) and num_objects_in_batch is not None:
name_suffix_format = schema.get('batch_name_format', '{:d}')
try:
name_suffix_format.format(1)
except (ValueError, KeyError):
name_suffix_format = '{:d}'
if name_suffix_format:
example_name_suffix = name_suffix_format.format(1)
else:
example_name_suffix = ''
if 'object__name__text' in form_data:
batch_base_name = form_data['object__name__text']
raw_form_data['object__name__text'] = [batch_base_name + example_name_suffix]
else:
enabled_languages = form_data.get('object__name__text_languages', [])
if 'en' not in enabled_languages:
enabled_languages.append('en')
for language_code in enabled_languages:
batch_base_name = form_data.get('object__name__text_' + language_code, '')
raw_form_data['object__name__text_' + language_code] = [batch_base_name + example_name_suffix]
else:
batch_base_name = None
name_suffix_format = None
object_data, object_errors = parse_form_data(raw_form_data, schema)
errors += object_errors
if object_data is not None and not errors:
try:
validate(object_data, schema)
except ValidationError:
# TODO: proper logging
print('object schema validation failed')
# TODO: handle error
flask.abort(400)
for markdown in logic.markdown_to_html.get_markdown_from_object_data(object_data):
markdown_as_html = logic.markdown_to_html.markdown_to_safe_html(markdown)
logic.markdown_images.mark_referenced_markdown_images_as_permanent(markdown_as_html)
if object is None:
if schema.get('batch', False) and num_objects_in_batch is not None:
if 'name' in object_data and 'text' in object_data['name'] and name_suffix_format is not None and batch_base_name is not None:
data_sequence = []
for i in range(1, num_objects_in_batch + 1):
if name_suffix_format:
name_suffix = name_suffix_format.format(i)
else:
name_suffix = ''
object_data['name']['text'] = batch_base_name + name_suffix
data_sequence.append(deepcopy(object_data))
else:
data_sequence = [object_data] * num_objects_in_batch
objects = create_object_batch(
action_id=action.id,
data_sequence=data_sequence,
user_id=flask_login.current_user.id,
copy_permissions_object_id=copy_permissions_object_id,
permissions_for_group_id=permissions_for_group_id,
permissions_for_project_id=permissions_for_project_id
)
object_ids = [object.id for object in objects]
if category_ids is not None:
log_entry = logic.instrument_log_entries.create_instrument_log_entry(
instrument_id=action.instrument.id,
user_id=flask_login.current_user.id,
content='',
category_ids=category_ids
)
for object_id in object_ids:
logic.instrument_log_entries.create_instrument_log_object_attachment(
instrument_log_entry_id=log_entry.id,
object_id=object_id
)
flask.flash(_('The objects were created successfully.'), 'success')
return flask.redirect(flask.url_for('.objects', ids=','.join([str(object_id) for object_id in object_ids])))
else:
object = create_object(
action_id=action.id,
data=object_data,
user_id=flask_login.current_user.id,
previous_object_id=previous_object_id,
schema=previous_object_schema,
copy_permissions_object_id=copy_permissions_object_id,
permissions_for_group_id=permissions_for_group_id,
permissions_for_project_id=permissions_for_project_id
)
if category_ids is not None:
log_entry = logic.instrument_log_entries.create_instrument_log_entry(
instrument_id=action.instrument.id,
user_id=flask_login.current_user.id,
content='',
category_ids=category_ids
)
logic.instrument_log_entries.create_instrument_log_object_attachment(
instrument_log_entry_id=log_entry.id,
object_id=object.id
)
flask.flash(_('The object was created successfully.'), 'success')
else:
if object_data != object.data or schema != object.schema:
update_object(object_id=object.id, user_id=flask_login.current_user.id, data=object_data, schema=schema)
flask.flash(_('The object was updated successfully.'), 'success')
return flask.redirect(flask.url_for('.object', object_id=object.id))
elif any(name.startswith('action_object__') and (name.endswith('__delete') or name.endswith('__add') or name.endswith('__addcolumn') or name.endswith('__deletecolumn')) for name in form_data):
action = [name for name in form_data if name.startswith('action_')][0]
previous_actions.append(action)
if previous_actions:
try:
for action in previous_actions:
apply_action_to_data(action, data, schema)
form_data = apply_action_to_form_data(previous_actions[-1], form_data)
except ValueError:
flask.abort(400)
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
referencable_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ
)
if object is not None:
referencable_objects = [
referencable_object
for referencable_object in referencable_objects
if referencable_object.object_id != object.object_id
]
else:
referencable_objects = []
existing_objects = []
sorted_actions = get_sorted_actions_for_user(
user_id=flask_login.current_user.id
)
action_type_id_by_action_id = {}
for action_type in get_action_types():
for action in get_actions(action_type.id):
action_type_id_by_action_id[action.id] = action_type.id
tags = [{'name': tag.name, 'uses': tag.uses} for tag in logic.tags.get_tags()]
users = get_users(exclude_hidden=True)
users.sort(key=lambda user: user.id)
english = get_language(Language.ENGLISH)
if object is None:
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
existing_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.GRANT
)
user_groups = logic.groups.get_user_groups(flask_login.current_user.id)
user_projects = logic.projects.get_user_projects(flask_login.current_user.id, include_groups=True)
return flask.render_template(
'objects/forms/form_create.html',
action_id=action_id,
schema=schema,
data=data,
errors=errors,
object_errors=object_errors,
form_data=form_data,
previous_actions=serializer.dumps(previous_actions),
form=form,
can_copy_permissions=True,
existing_objects=existing_objects,
user_groups=user_groups,
user_projects=user_projects,
referencable_objects=referencable_objects,
sorted_actions=sorted_actions,
action_type_id_by_action_id=action_type_id_by_action_id,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
ActionType=models.ActionType,
datetime=datetime,
tags=tags,
users=users,
may_create_log_entry=may_create_log_entry,
instrument_log_categories=instrument_log_categories,
create_log_entry_default=create_log_entry_default,
previous_object_id=previous_object_id,
has_grant_for_previous_object=has_grant_for_previous_object,
languages=get_languages(only_enabled_for_input=True),
ENGLISH=english
)
else:
return flask.render_template(
'objects/forms/form_edit.html',
schema=schema,
data=data,
object_id=object.object_id,
errors=errors,
object_errors=object_errors,
form_data=form_data,
previous_actions=serializer.dumps(previous_actions),
form=form,
referencable_objects=referencable_objects,
sorted_actions=sorted_actions,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
action_type_id_by_action_id=action_type_id_by_action_id,
ActionType=models.ActionType,
datetime=datetime,
tags=tags,
users=users,
mode=mode,
languages=get_languages(),
ENGLISH=english
)
def build_object_location_assignment_confirmation_url(object_location_assignment_id: int) -> None:
confirmation_url = flask.url_for(
'frontend.accept_responsibility_for_object',
t=logic.security_tokens.generate_token(
object_location_assignment_id,
salt='confirm_responsibility',
secret_key=flask.current_app.config['SECRET_KEY']
),
_external=True
)
return confirmation_url
def get_project_if_it_exists(project_id):
try:
return get_project(project_id)
except logic.errors.ProjectDoesNotExistError:
return None
def show_inline_edit(obj, action):
# Set view attributes
related_objects_tree = logic.object_relationships.build_related_objects_tree(obj.id, flask_login.current_user.id)
user_language_id = get_user_language(flask_login.current_user).id
english = get_language(Language.ENGLISH)
object_id = obj.id
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
user_may_grant = Permissions.GRANT in user_permissions
user_may_use_as_template = Permissions.READ in get_user_action_permissions(obj.action_id, user_id=flask_login.current_user.id)
new_schema_available = True if action.schema != obj.schema else False
instrument = get_instrument_with_translation_in_language(action.instrument_id,
user_language_id) if action.instrument else None
object_type = get_action_type_with_translation_in_language(
action_type_id=action.type_id,
language_id=user_language_id
).translation.object_name
object_log_entries = object_log.get_object_log_entries(object_id=obj.id, user_id=flask_login.current_user.id)
dataverse_enabled = bool(flask.current_app.config['DATAVERSE_URL'])
if dataverse_enabled:
dataverse_url = logic.dataverse_export.get_dataverse_url(obj.id)
show_dataverse_export = not dataverse_url
else:
dataverse_url = None
show_dataverse_export = False
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
token = serializer.dumps([flask_login.current_user.id, object_id])
mobile_upload_url = flask.url_for('.mobile_file_upload', object_id=object_id, token=token, _external=True)
mobile_upload_qrcode = generate_qrcode(mobile_upload_url, should_cache=False)
object_url = flask.url_for('.object', object_id=object_id, _external=True)
object_qrcode = generate_qrcode(object_url, should_cache=True)
location_form = ObjectLocationAssignmentForm()
locations_map, locations_tree = get_locations_tree()
locations = [('-1', '—')]
unvisited_location_ids_prefixes_and_subtrees = [(location_id, '', locations_tree[location_id]) for location_id in
locations_tree]
while unvisited_location_ids_prefixes_and_subtrees:
location_id, prefix, subtree = unvisited_location_ids_prefixes_and_subtrees.pop(0)
location = locations_map[location_id]
locations.append(
(str(location_id), '{}{} (#{})'.format(prefix, get_translated_text(location.name), location.id)))
for location_id in sorted(subtree, key=lambda location_id: get_translated_text(locations_map[location_id].name),
reverse=True):
unvisited_location_ids_prefixes_and_subtrees.insert(
0, (location_id, f'{prefix}{get_translated_text(location.name)} / ', subtree[location_id])
)
location_form.location.choices = locations
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
measurement_actions = logic.action_translations.get_actions_with_translation_in_language(user_language_id,
models.ActionType.MEASUREMENT,
use_fallback=True)
favorite_action_ids = logic.favorites.get_user_favorite_action_ids(flask_login.current_user.id)
favorite_measurement_actions = [
action
for action in measurement_actions
if action.id in favorite_action_ids and not action.is_hidden
]
# Sort by: instrument name (independent actions first), action name
favorite_measurement_actions.sort(key=lambda action: (
action.user.name.lower() if action.user else '',
get_instrument_with_translation_in_language(action.instrument_id,
user_language_id).translation.name.lower() if action.instrument else '',
action.translation.name.lower()
))
publication_form = ObjectPublicationForm()
object_publications = logic.publications.get_publications_for_object(object_id=obj.id)
user_may_link_publication = True
notebook_templates = get_notebook_templates(
object_id=obj.id,
data=obj.data,
schema=obj.schema,
user_id=flask_login.current_user.id
)
linked_project = logic.projects.get_project_linked_to_object(object_id)
object_languages = logic.languages.get_languages_in_object_data(obj.data)
languages = []
for lang_code in object_languages:
languages.append(get_language_by_lang_code(lang_code))
all_languages = get_languages()
metadata_language = flask.request.args.get('language', None)
if not any(
language.lang_code == metadata_language
for language in languages
):
metadata_language = None
view_kwargs = {
"template_mode": "inline_edit",
"show_object_type_and_id_on_object_page_text": get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TYPE_AND_ID_ON_OBJECT_PAGE"],
"show_object_title": get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TITLE"],
"measurement_type_name": logic.action_type_translations.get_action_type_translation_for_action_type_in_language(
action_type_id=logic.actions.models.ActionType.MEASUREMENT,
language_id=logic.languages.get_user_language(flask_login.current_user).id,
use_fallback=True
).name,
"metadata_language": metadata_language,
"languages": languages,
"all_languages": all_languages,
"SUPPORTED_LOCALES": logic.locale.SUPPORTED_LOCALES,
"ENGLISH": english,
"object_type": object_type,
"action": action,
"action_type": get_action_type_with_translation_in_language(action.type_id, user_language_id),
"instrument": instrument,
"schema": obj.schema,
"data": obj.data,
"object_log_entries": object_log_entries,
"ObjectLogEntryType": ObjectLogEntryType,
"last_edit_datetime": obj.utc_datetime,
"last_edit_user": get_user(obj.user_id),
"object_id": object_id,
"user_may_edit": True,
"user_may_comment": True,
"comments": comments.get_comments_for_object(object_id),
"comment_form": CommentForm(),
"files": logic.files.get_files_for_object(object_id),
"file_source_instrument_exists": False,
"file_source_jupyterhub_exists": False,
"file_form": FileForm(),
"external_link_form": ExternalLinkForm(),
"external_link_invalid": 'invalid_link' in flask.request.args,
"mobile_upload_url": mobile_upload_url,
"mobile_upload_qrcode": mobile_upload_qrcode,
"notebook_templates": notebook_templates,
"object_qrcode": object_qrcode,
"object_url": object_url,
"restore_form": None,
"version_id": obj.version_id,
"user_may_grant": user_may_grant,
"favorite_measurement_actions": favorite_measurement_actions,
"FileLogEntryType": FileLogEntryType,
"file_information_form": FileInformationForm(),
"file_hiding_form": FileHidingForm(),
"new_schema_available": new_schema_available,
"related_objects_tree": related_objects_tree,
"object_publications": object_publications,
"user_may_link_publication": user_may_link_publication,
"user_may_use_as_template": user_may_use_as_template,
"show_dataverse_export": show_dataverse_export,
"dataverse_url": dataverse_url,
"publication_form": publication_form,
"get_object": get_object,
"get_object_if_current_user_has_read_permissions": get_object_if_current_user_has_read_permissions,
"get_object_location_assignment": get_object_location_assignment,
"get_user": get_user,
"get_location": get_location,
"PAGE_SIZES": PAGE_SIZES,
"HORIZONTAL_LABEL_MARGIN": HORIZONTAL_LABEL_MARGIN,
"VERTICAL_LABEL_MARGIN": VERTICAL_LABEL_MARGIN,
"mm": mm,
"object_location_assignments": get_object_location_assignments(object_id),
"build_object_location_assignment_confirmation_url": build_object_location_assignment_confirmation_url,
"user_may_assign_location": True,
"location_form": location_form,
"project": linked_project,
"get_project": get_project_if_it_exists,
"get_action_type": get_action_type,
"get_action_type_with_translation_in_language": get_action_type_with_translation_in_language,
"get_instrument_with_translation_in_language": get_instrument_with_translation_in_language
}
# form kwargs
if action is not None and action.instrument is not None and flask_login.current_user in action.instrument.responsible_users:
instrument_log_categories = logic.instrument_log_entries.get_instrument_log_categories(action.instrument.id)
if 'create_instrument_log_entry' in flask.request.form:
category_ids = []
for category_id in flask.request.form.getlist('instrument_log_categories'):
try:
if int(category_id) in [category.id for category in instrument_log_categories]:
category_ids.append(int(category_id))
except Exception:
pass
errors = []
object_errors = {}
form_data = {}
previous_actions = []
serializer = itsdangerous.URLSafeSerializer(flask.current_app.config['SECRET_KEY'])
form = ObjectForm()
if flask.request.method != 'GET' and form.validate_on_submit():
raw_form_data = {key: flask.request.form.getlist(key) for key in flask.request.form}
form_data = {k: v[0] for k, v in raw_form_data.items()}
if 'input_num_batch_objects' in form_data:
try:
num_objects_in_batch = int(form_data['input_num_batch_objects'])
except ValueError:
try:
# The form allows notations like '1.2e1' for '12', however
# Python can only parse these as floats
num_objects_in_batch = float(form_data['input_num_batch_objects'])
if num_objects_in_batch == int(num_objects_in_batch):
num_objects_in_batch = int(num_objects_in_batch)
else:
raise
except ValueError:
errors.append('input_num_batch_objects')
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
if 'previous_actions' in flask.request.form:
try:
previous_actions = serializer.loads(flask.request.form['previous_actions'])
except itsdangerous.BadData:
flask.abort(400)
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
referencable_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ
)
if object is not None:
referencable_objects = [
referencable_object
for referencable_object in referencable_objects
if referencable_object.object_id != object_id
]
else:
referencable_objects = []
sorted_actions = get_sorted_actions_for_user(
user_id=flask_login.current_user.id
)
for action in sorted_actions:
db.session.expunge(action)
action_type_id_by_action_id = {}
for action_type in get_action_types():
for action in get_actions(action_type.id):
action_type_id_by_action_id[action.id] = action_type.id
tags = [{'name': tag.name, 'uses': tag.uses} for tag in logic.tags.get_tags()]
users = get_users(exclude_hidden=True)
users.sort(key=lambda user: user.id)
english = get_language(Language.ENGLISH)
form_kwargs = {
"errors": errors,
"object_errors": object_errors,
"form_data": form_data,
"previous_actions": serializer.dumps(previous_actions),
"form": form,
"referencable_objects": referencable_objects,
"sorted_actions": sorted_actions,
"action_type_id_by_action_id": action_type_id_by_action_id,
"ActionType": models.ActionType,
"datetime": datetime,
"tags": tags,
"users": users,
"mode": 'edit',
"languages": get_languages(),
"ENGLISH": english
}
kwargs = {**view_kwargs, **form_kwargs}
return flask.render_template('objects/inline_edit/inline_edit_base.html', **kwargs)
def get_object_if_current_user_has_read_permissions(object_id):
user_id = flask_login.current_user.id
try:
permissions = get_user_object_permissions(object_id, user_id)
except ObjectDoesNotExistError:
return None
if Permissions.READ not in permissions:
return None
return get_object(object_id)
@frontend.route('/objects/<int:object_id>', methods=['GET', 'POST'])
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object(object_id):
object = get_object(object_id=object_id)
related_objects_tree = logic.object_relationships.build_related_objects_tree(object_id, flask_login.current_user.id)
user_language_id = get_user_language(flask_login.current_user).id
english = get_language(Language.ENGLISH)
object_languages = set()
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
user_may_edit = Permissions.WRITE in user_permissions
user_may_grant = Permissions.GRANT in user_permissions
user_may_use_as_template = Permissions.READ in get_user_action_permissions(object.action_id, user_id=flask_login.current_user.id)
action = get_action_with_translation_in_language(object.action_id, user_language_id, use_fallback=True)
if action.schema != object.schema:
new_schema_available = True
else:
new_schema_available = False
if not user_may_edit and flask.request.args.get('mode', '') == 'edit':
return flask.abort(403)
if not user_may_edit and flask.request.args.get('mode', '') == 'upgrade':
return flask.abort(403)
if not flask.current_app.config['DISABLE_INLINE_EDIT']:
if not user_may_edit and flask.request.args.get('mode', '') == 'inline_edit':
return flask.abort(403)
if user_may_edit and flask.request.method == 'GET' and flask.request.args.get('mode', '') in {'', 'inline_edit'}:
return show_inline_edit(object, get_action(object.action_id))
if flask.request.method == 'GET' and flask.request.args.get('mode', '') not in ('edit', 'upgrade'):
instrument = get_instrument_with_translation_in_language(action.instrument_id, user_language_id) if action.instrument else None
object_type = get_action_type_with_translation_in_language(
action_type_id=action.type_id,
language_id=user_language_id
).translation.object_name
object_log_entries = object_log.get_object_log_entries(object_id=object_id, user_id=flask_login.current_user.id)
dataverse_enabled = bool(flask.current_app.config['DATAVERSE_URL'])
if dataverse_enabled:
dataverse_url = logic.dataverse_export.get_dataverse_url(object_id)
show_dataverse_export = user_may_grant and not dataverse_url
else:
dataverse_url = None
show_dataverse_export = False
if user_may_edit:
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
token = serializer.dumps([flask_login.current_user.id, object_id])
mobile_upload_url = flask.url_for('.mobile_file_upload', object_id=object_id, token=token, _external=True)
mobile_upload_qrcode = generate_qrcode(mobile_upload_url, should_cache=False)
else:
mobile_upload_url = None
mobile_upload_qrcode = None
object_url = flask.url_for('.object', object_id=object_id, _external=True)
object_qrcode = generate_qrcode(object_url, should_cache=True)
location_form = ObjectLocationAssignmentForm()
locations_map, locations_tree = get_locations_tree()
locations = [('-1', '—')]
unvisited_location_ids_prefixes_and_subtrees = [(location_id, '', locations_tree[location_id]) for location_id in locations_tree]
while unvisited_location_ids_prefixes_and_subtrees:
location_id, prefix, subtree = unvisited_location_ids_prefixes_and_subtrees.pop(0)
location = locations_map[location_id]
locations.append((str(location_id), '{}{} (#{})'.format(prefix, get_translated_text(location.name), location.id)))
for location_id in sorted(subtree, key=lambda location_id: get_translated_text(locations_map[location_id].name), reverse=True):
unvisited_location_ids_prefixes_and_subtrees.insert(0, (location_id, '{}{} / '.format(prefix, get_translated_text(location.name)), subtree[location_id]))
location_form.location.choices = locations
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
measurement_actions = logic.action_translations.get_actions_with_translation_in_language(user_language_id, models.ActionType.MEASUREMENT, use_fallback=True)
favorite_action_ids = logic.favorites.get_user_favorite_action_ids(flask_login.current_user.id)
favorite_measurement_actions = [
action
for action in measurement_actions
if action.id in favorite_action_ids and not action.is_hidden
]
# Sort by: instrument name (independent actions first), action name
favorite_measurement_actions.sort(key=lambda action: (
action.user.name.lower() if action.user else '',
get_instrument_with_translation_in_language(action.instrument_id, user_language_id).translation.name.lower() if action.instrument else '',
action.translation.name.lower()
))
publication_form = ObjectPublicationForm()
object_publications = logic.publications.get_publications_for_object(object_id=object.id)
user_may_link_publication = Permissions.WRITE in user_permissions
notebook_templates = get_notebook_templates(
object_id=object.id,
data=object.data,
schema=object.schema,
user_id=flask_login.current_user.id
)
linked_project = logic.projects.get_project_linked_to_object(object_id)
object_languages = logic.languages.get_languages_in_object_data(object.data)
languages = []
for lang_code in object_languages:
languages.append(get_language_by_lang_code(lang_code))
all_languages = get_languages()
metadata_language = flask.request.args.get('language', None)
if not any(
language.lang_code == metadata_language
for language in languages
):
metadata_language = None
return flask.render_template(
'objects/view/base.html',
template_mode="view",
show_object_type_and_id_on_object_page_text=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TYPE_AND_ID_ON_OBJECT_PAGE"],
show_object_title=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TITLE"],
measurement_type_name=logic.action_type_translations.get_action_type_translation_for_action_type_in_language(
action_type_id=logic.actions.models.ActionType.MEASUREMENT,
language_id=logic.languages.get_user_language(flask_login.current_user).id,
use_fallback=True
).name,
metadata_language=metadata_language,
languages=languages,
all_languages=all_languages,
SUPPORTED_LOCALES=logic.locale.SUPPORTED_LOCALES,
ENGLISH=english,
object_type=object_type,
action=action,
action_type=get_action_type_with_translation_in_language(action.type_id, user_language_id),
instrument=instrument,
schema=object.schema,
data=object.data,
object_log_entries=object_log_entries,
ObjectLogEntryType=ObjectLogEntryType,
last_edit_datetime=object.utc_datetime,
last_edit_user=get_user(object.user_id),
object_id=object_id,
user_may_edit=user_may_edit,
user_may_comment=user_may_edit,
comments=comments.get_comments_for_object(object_id),
comment_form=CommentForm(),
files=logic.files.get_files_for_object(object_id),
file_source_instrument_exists=False,
file_source_jupyterhub_exists=False,
file_form=FileForm(),
external_link_form=ExternalLinkForm(),
external_link_invalid='invalid_link' in flask.request.args,
mobile_upload_url=mobile_upload_url,
mobile_upload_qrcode=mobile_upload_qrcode,
notebook_templates=notebook_templates,
object_qrcode=object_qrcode,
object_url=object_url,
restore_form=None,
version_id=object.version_id,
user_may_grant=user_may_grant,
favorite_measurement_actions=favorite_measurement_actions,
FileLogEntryType=FileLogEntryType,
file_information_form=FileInformationForm(),
file_hiding_form=FileHidingForm(),
new_schema_available=new_schema_available,
related_objects_tree=related_objects_tree,
object_publications=object_publications,
user_may_link_publication=user_may_link_publication,
user_may_use_as_template=user_may_use_as_template,
show_dataverse_export=show_dataverse_export,
dataverse_url=dataverse_url,
publication_form=publication_form,
get_object=get_object,
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
get_object_location_assignment=get_object_location_assignment,
get_user=get_user,
get_location=get_location,
PAGE_SIZES=PAGE_SIZES,
HORIZONTAL_LABEL_MARGIN=HORIZONTAL_LABEL_MARGIN,
VERTICAL_LABEL_MARGIN=VERTICAL_LABEL_MARGIN,
mm=mm,
object_location_assignments=get_object_location_assignments(object_id),
build_object_location_assignment_confirmation_url=build_object_location_assignment_confirmation_url,
user_may_assign_location=user_may_edit,
location_form=location_form,
project=linked_project,
get_project=get_project_if_it_exists,
get_action_type=get_action_type,
get_action_type_with_translation_in_language=get_action_type_with_translation_in_language,
get_instrument_with_translation_in_language=get_instrument_with_translation_in_language
)
check_current_user_is_not_readonly()
if flask.request.args.get('mode', '') == 'upgrade':
should_upgrade_schema = True
else:
should_upgrade_schema = False
return show_object_form(object, action=get_action(object.action_id), should_upgrade_schema=should_upgrade_schema)
@frontend.route('/objects/<int:object_id>/dc.rdf')
@frontend.route('/objects/<int:object_id>/versions/<int:version_id>/dc.rdf')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_rdf(object_id, version_id=None):
rdf_xml = logic.rdf.generate_rdf(flask_login.current_user.id, object_id, version_id)
return flask.Response(
rdf_xml,
mimetype='application/rdf+xml',
)
@frontend.route('/objects/<int:object_id>/label')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def print_object_label(object_id):
mode = flask.request.args.get('mode', 'mixed')
if mode == 'fixed-width':
create_mixed_labels = False
create_long_labels = False
include_qrcode_in_long_labels = None
paper_format = flask.request.args.get('width-paper-format', '')
if paper_format not in PAGE_SIZES:
paper_format = DEFAULT_PAPER_FORMAT
maximum_width = math.floor(PAGE_SIZES[paper_format][0] / mm - 2 * HORIZONTAL_LABEL_MARGIN)
maximum_height = math.floor(PAGE_SIZES[paper_format][1] / mm - 2 * VERTICAL_LABEL_MARGIN)
ghs_classes_side_by_side = 'side-by-side' in flask.request.args
label_minimum_width = 20
if ghs_classes_side_by_side:
label_minimum_width = 40
try:
label_width = float(flask.request.args.get('label-width', '20'))
except ValueError:
label_width = 0
if math.isnan(label_width):
label_width = 0
if label_width < label_minimum_width:
label_width = label_minimum_width
if label_width > maximum_width:
label_width = maximum_width
try:
label_minimum_height = float(flask.request.args.get('label-minimum-height', '0'))
except ValueError:
label_minimum_height = 0
if math.isnan(label_minimum_height):
label_minimum_height = 0
if label_minimum_height < 0:
label_minimum_height = 0
if label_minimum_height > maximum_height:
label_minimum_height = maximum_height
qrcode_width = 18
centered = 'centered' in flask.request.args
elif mode == 'minimum-height':
create_mixed_labels = False
create_long_labels = True
paper_format = flask.request.args.get('height-paper-format', '')
if paper_format not in PAGE_SIZES:
paper_format = DEFAULT_PAPER_FORMAT
maximum_width = math.floor(PAGE_SIZES[paper_format][0] / mm - 2 * HORIZONTAL_LABEL_MARGIN)
include_qrcode_in_long_labels = 'include-qrcode' in flask.request.args
label_width = 0
label_minimum_height = 0
try:
label_minimum_width = float(flask.request.args.get('label-minimum-width', '0'))
except ValueError:
label_minimum_width = 0
if math.isnan(label_minimum_width):
label_minimum_width = 0
if label_minimum_width < 0:
label_minimum_width = 0
if label_minimum_width > maximum_width:
label_minimum_width = maximum_width
qrcode_width = 0
ghs_classes_side_by_side = None
centered = None
else:
create_mixed_labels = True
create_long_labels = None
include_qrcode_in_long_labels = None
paper_format = flask.request.args.get('mixed-paper-format', '')
if paper_format not in PAGE_SIZES:
paper_format = DEFAULT_PAPER_FORMAT
label_width = 0
label_minimum_height = 0
qrcode_width = 0
label_minimum_width = 0
ghs_classes_side_by_side = None
centered = None
object = get_object(object_id=object_id)
object_log_entries = object_log.get_object_log_entries(object_id=object_id, user_id=flask_login.current_user.id)
for object_log_entry in object_log_entries:
if object_log_entry.type in (ObjectLogEntryType.CREATE_OBJECT, ObjectLogEntryType.CREATE_BATCH):
creation_date = object_log_entry.utc_datetime.strftime('%Y-%m-%d')
creation_user = get_user(object_log_entry.user_id).name
break
else:
creation_date = _('Unknown')
creation_user = _('Unknown')
if 'created' in object.data and '_type' in object.data['created'] and object.data['created']['_type'] == 'datetime':
creation_date = object.data['created']['utc_datetime'].split(' ')[0]
if 'name' in object.data and '_type' in object.data['name'] and object.data['name']['_type'] == 'text':
object_name = get_translated_text(object.data['name']['text'])
else:
object_name = _('Unknown Object')
object_url = flask.url_for('.object', object_id=object_id, _external=True)
if 'hazards' in object.data and '_type' in object.data['hazards'] and object.data['hazards']['_type'] == 'hazards':
hazards = object.data['hazards']['hazards']
else:
hazards = []
pdf_data = create_labels(
object_id=object_id,
object_name=object_name,
object_url=object_url,
creation_user=creation_user,
creation_date=creation_date,
ghs_classes=hazards,
paper_format=paper_format,
create_mixed_labels=create_mixed_labels,
create_long_labels=create_long_labels,
include_qrcode_in_long_labels=include_qrcode_in_long_labels,
label_width=label_width,
label_minimum_height=label_minimum_height,
label_minimum_width=label_minimum_width,
qrcode_width=qrcode_width,
ghs_classes_side_by_side=ghs_classes_side_by_side,
centered=centered
)
return flask.send_file(
io.BytesIO(pdf_data),
mimetype='application/pdf',
cache_timeout=-1
)
@frontend.route('/objects/<int:object_id>/comments/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_comments(object_id):
check_current_user_is_not_readonly()
comment_form = CommentForm()
if comment_form.validate_on_submit():
content = comment_form.content.data
comments.create_comment(object_id=object_id, user_id=flask_login.current_user.id, content=content)
flask.flash(_('Successfully posted a comment.'), 'success')
else:
flask.flash(_('Please enter a comment text.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/search/')
@flask_login.login_required
def search():
actions = get_sorted_actions_for_user(
user_id=flask_login.current_user.id
)
user_language_id = get_user_language(flask_login.current_user).id
action_types = get_action_types_with_translations_in_language(user_language_id)
search_paths = {}
search_paths_by_action = {}
search_paths_by_action_type = {}
for action_type in action_types:
search_paths_by_action_type[action_type.id] = {}
for action in actions:
search_paths_by_action[action.id] = {}
if action.type_id not in search_paths_by_action_type:
search_paths_by_action_type[action.type_id] = {}
for property_path, property_type in logic.schemas.utils.get_property_paths_for_schema(
schema=action.schema,
valid_property_types={'text', 'bool', 'quantity', 'datetime'}
).items():
property_path = '.'.join(
key if key is not None else '?'
for key in property_path
)
search_paths_by_action[action.id][property_path] = [property_type]
if property_path not in search_paths_by_action_type[action.type_id]:
search_paths_by_action_type[action.type_id][property_path] = [property_type]
elif property_type not in search_paths_by_action_type[action.type_id][property_path]:
search_paths_by_action_type[action.type_id][property_path].append(property_type)
if property_path not in search_paths:
search_paths[property_path] = [property_type]
elif property_type not in search_paths[property_path]:
search_paths[property_path].append(property_type)
return flask.render_template(
'search.html',
search_paths=search_paths,
search_paths_by_action=search_paths_by_action,
search_paths_by_action_type=search_paths_by_action_type,
actions=actions,
action_types=action_types,
datetime=datetime
), 200, {
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'
}
@frontend.route('/objects/referencable')
@flask_login.login_required
def referencable_objects():
required_perm = Permissions.READ
if 'required_perm' in flask.request.args:
try:
required_perm = Permissions.from_name(flask.request.args['required_perm'])
except ValueError:
try:
required_perm = Permissions(int(flask.request.args['required_perm']))
except ValueError:
return {
"message": "argument {} is not a valid permission.".format(flask.request.args['required_perm'])
}, 400
referencable_objects = get_object_info_with_permissions(
user_id=flask_login.current_user.id,
permissions=required_perm,
)
def dictify(x):
return {
'id': x.object_id,
'text': flask.escape('{} (#{})'.format(get_translated_text(x.name_json), x.object_id)),
'action_id': x.action_id,
'max_permission': x.max_permission,
'tags': [flask.escape(tag) for tag in x.tags['tags']] if x.tags and isinstance(x.tags, dict) and x.tags.get('_type') == 'tags' and x.tags.get('tags') else []
}
return {'referencable_objects': [dictify(x) for x in referencable_objects]}
@frontend.route('/objects/<int:object_id>/permissions/request', methods=['POST'])
@flask_login.login_required
def object_permissions_request(object_id):
current_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
if Permissions.READ in current_permissions:
flask.flash(_('You already have permissions to access this object.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
request_object_permissions(flask_login.current_user.id, object_id)
flask.flash(_('Your request for permissions has been sent.'), 'success')
return flask.redirect(flask.url_for('.objects'))
@frontend.route('/objects/<int:object_id>/locations/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_location(object_id):
check_current_user_is_not_readonly()
location_form = ObjectLocationAssignmentForm()
location_form.location.choices = [('-1', '—')] + [
(str(location.id), '{} (#{})'.format(location.name, location.id))
for location in get_locations()
]
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
if location_form.validate_on_submit():
location_id = int(location_form.location.data)
if location_id < 0:
location_id = None
responsible_user_id = int(location_form.responsible_user.data)
if responsible_user_id < 0:
responsible_user_id = None
description = location_form.description.data
try:
description = json.loads(description)
except Exception:
description = {}
valid_description = {'en': ''}
for language_code, description_text in description.items():
if not isinstance(language_code, str):
continue
try:
language = get_language_by_lang_code(language_code)
except logic.errors.LanguageDoesNotExistError:
continue
if not language.enabled_for_input:
continue
valid_description[language_code] = description_text
description = valid_description
if location_id is not None or responsible_user_id is not None:
assign_location_to_object(object_id, location_id, responsible_user_id, flask_login.current_user.id, description)
flask.flash(_('Successfully assigned a new location to this object.'), 'success')
else:
flask.flash(_('Please select a location or a responsible user.'), 'error')
else:
flask.flash(_('Please select a location or a responsible user.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/<int:object_id>/publications/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_publication(object_id):
check_current_user_is_not_readonly()
publication_form = ObjectPublicationForm()
if publication_form.validate_on_submit():
doi = publication_form.doi.data
title = publication_form.title.data
object_name = publication_form.object_name.data
if title is not None:
title = title.strip()
if not title:
title = None
if object_name is not None:
object_name = object_name.strip()
if not object_name:
object_name = None
existing_publication = ([
publication
for publication in logic.publications.get_publications_for_object(object_id)
if publication.doi == doi
] or [None])[0]
if existing_publication is not None and existing_publication.title == title and existing_publication.object_name == object_name:
flask.flash(_('This object has already been linked to this publication.'), 'info')
else:
logic.publications.link_publication_to_object(user_id=flask_login.current_user.id, object_id=object_id, doi=doi, title=title, object_name=object_name)
if existing_publication is None:
flask.flash(_('Successfully linked this object to a publication.'), 'success')
else:
flask.flash(_('Successfully updated the information for this publication.'), 'success')
else:
flask.flash(_('Please enter a valid DOI for the publication you want to link this object to.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/<int:object_id>/export')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def export_data(object_id):
object_ids = [object_id]
file_extension = flask.request.args.get('format', '.pdf')
if file_extension != '.pdf' and file_extension not in logic.export.FILE_FORMATS:
return flask.abort(400)
if 'object_ids' in flask.request.args:
try:
object_ids = json.loads(flask.request.args['object_ids'])
object_ids = [int(i) for i in object_ids]
if any((Permissions.READ not in get_user_object_permissions(i, flask_login.current_user.id)) for i in object_ids):
return flask.abort(400)
except Exception:
return flask.abort(400)
if not object_ids:
return flask.abort(400)
if file_extension == '.pdf':
sections = pdfexport.SECTIONS
if 'sections' in flask.request.args:
try:
sections = sections.intersection(json.loads(flask.request.args['sections']))
except Exception:
return flask.abort(400)
if 'language' in flask.request.args:
try:
lang_code = flask.request.args['language']
if lang_code not in logic.locale.SUPPORTED_LOCALES:
raise ValueError()
language = logic.languages.get_language_by_lang_code(lang_code)
if not language.enabled_for_user_interface:
raise ValueError()
except Exception:
lang_code = 'en'
else:
lang_code = 'en'
pdf_data = pdfexport.create_pdfexport(object_ids, sections, lang_code)
file_bytes = io.BytesIO(pdf_data)
elif file_extension in logic.export.FILE_FORMATS:
file_bytes = logic.export.FILE_FORMATS[file_extension][1](flask_login.current_user.id, object_ids=object_ids)
else:
file_bytes = None
if file_bytes:
return flask.Response(
file_bytes,
200,
headers={
'Content-Disposition': f'attachment; filename=sampledb_export{file_extension}',
'Content-Type': 'application/pdf' if file_extension == '.pdf' else logic.export.FILE_FORMATS[file_extension][2]
}
)
return flask.abort(500)
@frontend.route('/objects/<int:object_id>/files/')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_files(object_id):
files = logic.files.get_files_for_object(object_id)
zip_bytes = io.BytesIO()
with zipfile.ZipFile(zip_bytes, 'w') as zip_file:
for file in files:
if file.is_hidden:
continue
if file.storage in {'local', 'database'}:
try:
file_bytes = file.open(read_only=True).read()
except Exception:
pass
else:
zip_file.writestr(os.path.basename(file.original_file_name), file_bytes)
return flask.Response(
zip_bytes.getvalue(),
200,
headers={
'Content-Type': 'application/zip',
'Content-Disposition': f'attachment; filename=object_{object_id}_files.zip'
}
)
@frontend.route('/objects/<int:object_id>/files/<int:file_id>', methods=['GET'])
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_file(object_id, file_id):
file = logic.files.get_file_for_object(object_id, file_id)
if file is None:
return flask.abort(404)
if file.is_hidden:
return flask.abort(403)
if file.storage in ('local', 'database'):
if 'preview' in flask.request.args:
file_extension = os.path.splitext(file.original_file_name)[1]
mime_type = flask.current_app.config.get('MIME_TYPES', {}).get(file_extension, None)
if mime_type is not None:
return flask.send_file(file.open(), mimetype=mime_type, last_modified=file.utc_datetime)
return flask.send_file(file.open(), as_attachment=True, attachment_filename=file.original_file_name, last_modified=file.utc_datetime)
# TODO: better error handling
return flask.abort(404)
@frontend.route('/objects/<int:object_id>/files/<int:file_id>', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def update_file_information(object_id, file_id):
check_current_user_is_not_readonly()
form = FileInformationForm()
if not form.validate_on_submit():
return flask.abort(400)
title = form.title.data
description = form.description.data
try:
logic.files.update_file_information(
object_id=object_id,
file_id=file_id,
user_id=flask_login.current_user.id,
title=title,
description=description
)
except logic.errors.FileDoesNotExistError:
return flask.abort(404)
return flask.redirect(flask.url_for('.object', object_id=object_id, _anchor='file-{}'.format(file_id)))
@frontend.route('/objects/<int:object_id>/files/<int:file_id>/hide', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def hide_file(object_id, file_id):
check_current_user_is_not_readonly()
form = FileHidingForm()
if not form.validate_on_submit():
return flask.abort(400)
reason = form.reason.data
try:
logic.files.hide_file(
object_id=object_id,
file_id=file_id,
user_id=flask_login.current_user.id,
reason=reason
)
except logic.errors.FileDoesNotExistError:
return flask.abort(404)
flask.flash(_('The file was hidden successfully.'), 'success')
return flask.redirect(flask.url_for('.object', object_id=object_id, _anchor='file-{}'.format(file_id)))
@frontend.route('/objects/<int:object_id>/files/mobile_upload/<token>', methods=['GET'])
def mobile_file_upload(object_id: int, token: str):
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
try:
user_id, object_id = serializer.loads(token, max_age=15 * 60)
except itsdangerous.BadSignature:
return flask.abort(400)
try:
user = logic.users.get_user(user_id)
except UserDoesNotExistError:
return flask.abort(403)
if user.is_readonly:
return flask.abort(403)
return flask.render_template('mobile_upload.html')
@frontend.route('/objects/<int:object_id>/files/mobile_upload/<token>', methods=['POST'])
def post_mobile_file_upload(object_id: int, token: str):
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
try:
user_id, object_id = serializer.loads(token, max_age=15 * 60)
except itsdangerous.BadSignature:
return flask.abort(400)
try:
user = logic.users.get_user(user_id)
except UserDoesNotExistError:
return flask.abort(403)
if user.is_readonly:
return flask.abort(403)
files = flask.request.files.getlist('file_input')
if not files:
return flask.redirect(
flask.url_for(
'.mobile_file_upload',
object_id=object_id,
token=token
)
)
for file_storage in files:
file_name = werkzeug.utils.secure_filename(file_storage.filename)
logic.files.create_database_file(object_id, user_id, file_name, lambda stream: file_storage.save(dst=stream))
return flask.render_template('mobile_upload_success.html')
@frontend.route('/objects/<int:object_id>/files/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_files(object_id):
check_current_user_is_not_readonly()
external_link_form = ExternalLinkForm()
file_form = FileForm()
if file_form.validate_on_submit():
file_source = file_form.file_source.data
if file_source == 'local':
files = flask.request.files.getlist(file_form.local_files.name)
for file_storage in files:
file_name = werkzeug.utils.secure_filename(file_storage.filename)
logic.files.create_database_file(object_id, flask_login.current_user.id, file_name, lambda stream: file_storage.save(dst=stream))
flask.flash(_('Successfully uploaded files.'), 'success')
else:
flask.flash(_('Failed to upload files.'), 'error')
elif external_link_form.validate_on_submit():
url = external_link_form.url.data
logic.files.create_url_file(object_id, flask_login.current_user.id, url)
flask.flash(_('Successfully posted link.'), 'success')
elif external_link_form.errors:
flask.flash(_('Failed to post link.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id, invalid_link=True, _anchor='anchor-post-link'))
else:
flask.flash(_('Failed to upload files.'), 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/new', methods=['GET', 'POST'])
@flask_login.login_required
def new_object():
check_current_user_is_not_readonly()
action_id = flask.request.args.get('action_id', None)
previous_object_id = flask.request.args.get('previous_object_id', None)
if not action_id and not previous_object_id:
# TODO: handle error
return flask.abort(404)
sample_id = flask.request.args.get('sample_id', None)
previous_object = None
action = None
if previous_object_id:
try:
previous_object = get_object(previous_object_id)
except ObjectDoesNotExistError:
flask.flash(_("This object does not exist."), 'error')
return flask.abort(404)
if Permissions.READ not in get_user_object_permissions(user_id=flask_login.current_user.id, object_id=previous_object_id):
flask.flash(_("You do not have the required permissions to use this object as a template."), 'error')
return flask.abort(403)
if action_id:
if action_id != str(previous_object.action_id):
flask.flash(_("This object was created with a different action."), 'error')
return flask.abort(400)
else:
action_id = previous_object.action_id
if action_id:
try:
action = get_action(action_id)
except ActionDoesNotExistError:
flask.flash(_("This action does not exist."), 'error')
return flask.abort(404)
if Permissions.READ not in get_user_action_permissions(action_id, user_id=flask_login.current_user.id):
flask.flash(_("You do not have the required permissions to use this action."), 'error')
return flask.abort(403)
placeholder_data = {}
if sample_id is not None:
try:
sample_id = int(sample_id)
except ValueError:
sample_id = None
else:
if sample_id <= 0:
sample_id = None
if sample_id is not None:
try:
logic.objects.get_object(sample_id)
except logic.errors.ObjectDoesNotExistError:
sample_id = None
if sample_id is not None:
if action.schema.get('properties', {}).get('sample', {}).get('type', '') == 'sample':
placeholder_data = {
('sample', ): {'_type': 'sample', 'object_id': sample_id}
}
# TODO: check instrument permissions
return show_object_form(None, action, previous_object, placeholder_data=placeholder_data)
@frontend.route('/objects/<int:object_id>/versions/')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_versions(object_id):
object = get_object(object_id=object_id)
if object is None:
return flask.abort(404)
object_versions = get_object_versions(object_id=object_id)
object_versions.sort(key=lambda object_version: -object_version.version_id)
return flask.render_template('objects/object_versions.html', get_user=get_user, object=object, object_versions=object_versions)
@frontend.route('/objects/<int:object_id>/versions/<int:version_id>')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_version(object_id, version_id):
user_language_id = logic.languages.get_user_language(flask_login.current_user).id
english = get_language(Language.ENGLISH)
object = get_object(object_id=object_id, version_id=version_id)
form = None
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
if Permissions.WRITE in user_permissions:
current_object = get_object(object_id=object_id)
if current_object.version_id != version_id:
form = ObjectVersionRestoreForm()
user_may_grant = Permissions.GRANT in user_permissions
action = get_action_with_translation_in_language(object.action_id, user_language_id, use_fallback=True)
action_type = get_action_type_with_translation_in_language(action.type_id, user_language_id)
instrument = get_instrument_with_translation_in_language(action.instrument_id, user_language_id) if action.instrument_id else None
object_languages = logic.languages.get_languages_in_object_data(object.data)
languages = []
for lang_code in object_languages:
languages.append(get_language_by_lang_code(lang_code))
metadata_language = flask.request.args.get('language', None)
if not any(
language.lang_code == metadata_language
for language in languages
):
metadata_language = None
return flask.render_template(
'objects/view/base.html',
template_mode="view",
show_object_type_and_id_on_object_page_text=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TYPE_AND_ID_ON_OBJECT_PAGE"],
show_object_title=get_user_settings(flask_login.current_user.id)["SHOW_OBJECT_TITLE"],
languages=languages,
metadata_language=metadata_language,
ENGLISH=english,
is_archived=True,
object_type=action_type.translation.object_name,
action=action,
action_type=action_type,
instrument=instrument,
schema=object.schema,
data=object.data,
last_edit_datetime=object.utc_datetime,
last_edit_user=get_user(object.user_id),
get_object_if_current_user_has_read_permissions=get_object_if_current_user_has_read_permissions,
object_id=object_id,
version_id=version_id,
link_version_specific_rdf=True,
restore_form=form,
get_user=get_user,
user_may_grant=user_may_grant,
get_action_type=get_action_type,
get_action_type_with_translation_in_language=get_action_type_with_translation_in_language,
)
@frontend.route('/objects/<int:object_id>/versions/<int:version_id>/restore', methods=['GET', 'POST'])
@object_permissions_required(Permissions.WRITE)
def restore_object_version(object_id, version_id):
if version_id < 0 or object_id < 0:
return flask.abort(404)
try:
current_object = get_object(object_id=object_id)
except ObjectDoesNotExistError:
return flask.abort(404)
if current_object.version_id <= version_id:
return flask.abort(404)
form = ObjectVersionRestoreForm()
if form.validate_on_submit():
logic.objects.restore_object_version(object_id=object_id, version_id=version_id, user_id=flask_login.current_user.id)
return flask.redirect(flask.url_for('.object', object_id=object_id))
return flask.render_template('objects/restore_object_version.html', object_id=object_id, version_id=version_id, restore_form=form)
@frontend.route('/objects/<int:object_id>/permissions')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object_permissions(object_id):
check_current_user_is_not_readonly()
object = get_object(object_id)
action = get_action(object.action_id)
instrument = action.instrument
user_permissions = get_object_permissions_for_users(object_id=object_id, include_instrument_responsible_users=False, include_groups=False, include_projects=False, include_readonly=False, include_admin_permissions=False)
group_permissions = get_object_permissions_for_groups(object_id=object_id, include_projects=False)
project_permissions = get_object_permissions_for_projects(object_id=object_id)
public_permissions = Permissions.READ if object_is_public(object_id) else Permissions.NONE
suggested_user_id = flask.request.args.get('add_user_id', '')
try:
suggested_user_id = int(suggested_user_id)
except ValueError:
suggested_user_id = None
if Permissions.GRANT in get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id):
user_permission_form_data = []
for user_id, permissions in user_permissions.items():
if user_id is None:
continue
user_permission_form_data.append({'user_id': user_id, 'permissions': permissions.name.lower()})
group_permission_form_data = []
for group_id, permissions in group_permissions.items():
if group_id is None:
continue
group_permission_form_data.append({'group_id': group_id, 'permissions': permissions.name.lower()})
project_permission_form_data = []
for project_id, permissions in project_permissions.items():
if project_id is None:
continue
project_permission_form_data.append({'project_id': project_id, 'permissions': permissions.name.lower()})
edit_user_permissions_form = ObjectPermissionsForm(public_permissions=public_permissions.name.lower(), user_permissions=user_permission_form_data, group_permissions=group_permission_form_data, project_permissions=project_permission_form_data)
users = get_users(exclude_hidden=True)
users = [user for user in users if user.id not in user_permissions]
add_user_permissions_form = ObjectUserPermissionsForm()
groups = get_user_groups(flask_login.current_user.id)
groups = [group for group in groups if group.id not in group_permissions]
add_group_permissions_form = ObjectGroupPermissionsForm()
projects = get_user_projects(flask_login.current_user.id, include_groups=True)
projects = [project for project in projects if project.id not in project_permissions]
add_project_permissions_form = ObjectProjectPermissionsForm()
copy_permissions_form = CopyPermissionsForm()
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
existing_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.GRANT
)
copy_permissions_form.object_id.choices = [
(str(existing_object.id), existing_object.data['name']['text'])
for existing_object in existing_objects
if existing_object.id != object_id
]
if len(copy_permissions_form.object_id.choices) == 0:
copy_permissions_form = None
else:
copy_permissions_form.object_id.choices = []
else:
edit_user_permissions_form = None
add_user_permissions_form = None
add_group_permissions_form = None
add_project_permissions_form = None
copy_permissions_form = None
users = []
groups = []
projects = []
acceptable_project_ids = {
project.id
for project in projects
}
all_projects = logic.projects.get_projects()
all_projects_by_id = {
project.id: project
for project in all_projects
}
if not flask.current_app.config['DISABLE_SUBPROJECTS']:
project_id_hierarchy_list = logic.projects.get_project_id_hierarchy_list(list(all_projects_by_id))
project_id_hierarchy_list = [
(level, project_id, project_id in acceptable_project_ids)
for level, project_id in project_id_hierarchy_list
]
else:
project_id_hierarchy_list = [
(0, project.id, project.id in acceptable_project_ids)
for project in sorted(all_projects, key=lambda project: project.id)
]
return flask.render_template(
'objects/object_permissions.html',
instrument=instrument,
action=action,
object=object,
user_permissions=user_permissions,
group_permissions=group_permissions,
project_permissions=project_permissions,
public_permissions=public_permissions,
get_user=get_user,
Permissions=Permissions,
form=edit_user_permissions_form,
users=users,
groups=groups,
projects_by_id=all_projects_by_id,
project_id_hierarchy_list=project_id_hierarchy_list,
show_projects_form=len(acceptable_project_ids) > 0,
add_user_permissions_form=add_user_permissions_form,
add_group_permissions_form=add_group_permissions_form,
get_group=get_group,
add_project_permissions_form=add_project_permissions_form,
copy_permissions_form=copy_permissions_form,
get_project=get_project,
suggested_user_id=suggested_user_id
)
@frontend.route('/objects/<int:object_id>/permissions', methods=['POST'])
@object_permissions_required(Permissions.GRANT)
def update_object_permissions(object_id):
edit_user_permissions_form = ObjectPermissionsForm()
add_user_permissions_form = ObjectUserPermissionsForm()
add_group_permissions_form = ObjectGroupPermissionsForm()
add_project_permissions_form = ObjectProjectPermissionsForm()
copy_permissions_form = CopyPermissionsForm()
if 'copy_permissions' in flask.request.form:
if not flask.current_app.config["LOAD_OBJECTS_IN_BACKGROUND"]:
existing_objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.GRANT
)
copy_permissions_form.object_id.choices = [
(str(existing_object.id), existing_object.data['name']['text'])
for existing_object in existing_objects
if existing_object.id != object_id
]
else:
copy_permissions_form.object_id.choices = []
if copy_permissions_form.validate_on_submit():
logic.object_permissions.copy_permissions(object_id, int(copy_permissions_form.object_id.data))
logic.object_permissions.set_user_object_permissions(object_id, flask_login.current_user.id, Permissions.GRANT)
flask.flash(_("Successfully copied object permissions."), 'success')
elif 'edit_user_permissions' in flask.request.form and edit_user_permissions_form.validate_on_submit():
set_object_public(object_id, edit_user_permissions_form.public_permissions.data == 'read')
for user_permissions_data in edit_user_permissions_form.user_permissions.data:
user_id = user_permissions_data['user_id']
try:
get_user(user_id)
except UserDoesNotExistError:
continue
permissions = Permissions.from_name(user_permissions_data['permissions'])
set_user_object_permissions(object_id=object_id, user_id=user_id, permissions=permissions)
for group_permissions_data in edit_user_permissions_form.group_permissions.data:
group_id = group_permissions_data['group_id']
try:
get_group(group_id)
except GroupDoesNotExistError:
continue
permissions = Permissions.from_name(group_permissions_data['permissions'])
set_group_object_permissions(object_id=object_id, group_id=group_id, permissions=permissions)
for project_permissions_data in edit_user_permissions_form.project_permissions.data:
project_id = project_permissions_data['project_id']
try:
get_project(project_id)
except ProjectDoesNotExistError:
continue
permissions = Permissions.from_name(project_permissions_data['permissions'])
set_project_object_permissions(object_id=object_id, project_id=project_id, permissions=permissions)
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
flask.flash(_("Successfully updated object permissions."), 'success')
elif 'add_user_permissions' in flask.request.form and add_user_permissions_form.validate_on_submit():
user_id = add_user_permissions_form.user_id.data
permissions = Permissions.from_name(add_user_permissions_form.permissions.data)
object_permissions = get_object_permissions_for_users(object_id=object_id, include_instrument_responsible_users=False, include_groups=False, include_projects=False, include_admin_permissions=False)
assert permissions in [Permissions.READ, Permissions.WRITE, Permissions.GRANT]
assert user_id not in object_permissions
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
set_user_object_permissions(object_id=object_id, user_id=user_id, permissions=permissions)
flask.flash(_("Successfully updated object permissions."), 'success')
elif 'add_group_permissions' in flask.request.form and add_group_permissions_form.validate_on_submit():
group_id = add_group_permissions_form.group_id.data
permissions = Permissions.from_name(add_group_permissions_form.permissions.data)
object_permissions = get_object_permissions_for_groups(object_id=object_id)
assert permissions in [Permissions.READ, Permissions.WRITE, Permissions.GRANT]
assert group_id not in object_permissions
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
set_group_object_permissions(object_id=object_id, group_id=group_id, permissions=permissions)
flask.flash(_("Successfully updated object permissions."), 'success')
elif 'add_project_permissions' in flask.request.form and add_project_permissions_form.validate_on_submit():
project_id = add_project_permissions_form.project_id.data
permissions = Permissions.from_name(add_project_permissions_form.permissions.data)
object_permissions = get_object_permissions_for_projects(object_id=object_id)
assert permissions in [Permissions.READ, Permissions.WRITE, Permissions.GRANT]
assert project_id not in object_permissions
user_log.edit_object_permissions(user_id=flask_login.current_user.id, object_id=object_id)
set_project_object_permissions(object_id=object_id, project_id=project_id, permissions=permissions)
flask.flash(_("Successfully updated object permissions."), 'success')
else:
flask.flash(_("A problem occurred while changing the object permissions. Please try again."), 'error')
return flask.redirect(flask.url_for('.object_permissions', object_id=object_id))
| 47.410323 | 405 | 0.664943 | [
"MIT"
] | sciapp/sampledb | sampledb/frontend/objects.py | 110,241 | Python |
from decimal import Decimal, ROUND_DOWN
from time import time
def elapsed(t0=0.0):
"""get elapsed time from the give time
Returns:
now: the absolute time now
dt_str: elapsed time in string
"""
now = time()
dt = now - t0
dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN)
if dt_sec <= 1:
dt_str = str(dt_sec) + ' second'
else:
dt_str = str(dt_sec) + ' seconds'
return now, dt_str
| 23.85 | 77 | 0.603774 | [
"Apache-2.0"
] | mhdella/andes | andes/utils/time.py | 477 | Python |
_base_ = './fcn_r50-d8_512x1024_40k_cityscapes.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
| 43.666667 | 79 | 0.778626 | [
"Apache-2.0"
] | 1171000410/mmsegmentation | configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py | 131 | Python |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
import pytest
env = Environment()
# time format for Apr 5 22:51:54 2021
# <187>{{ arubadate }} {{ host }} authmgr[4130]: <124198> <4130> <ERRS> <{{ host }} 10.10.10.10> {00:00:00:00:00:00-??} Missing server in attribute list, auth=VPN, utype=L3.
# <187>{{ arubadate }} {{ host }} stm[4133]: <399803> <4133> <ERRS> <{{ host }} 10.10.10.10> An internal system error has occurred at file sapm_ap_mgmt.c function sapm_get_img_build_version_str line 11853 error stat /mswitch/sap/mips64.ari failed: No such file or directory.
# <188>{{ arubadate }} {{ host }} wms[4096]: <126005> <4096> <WARN> <{{ host }} 10.10.10.10> |ids| Interfering AP: The system classified an access point (BSSID 00:0e:8e:96:f4:32 and SSID on CHANNEL 36) as interfering. Additional Info: Detector-AP-Name:00:0b:86:9e:6b:5f; Detector-AP-MAC:24:de:c6:70:2c:90; Detector-AP-Radio:1.
# <191>{{ arubadate }} 10.10.10.10 dnsmasq: reading /etc/resolv.conf
testdata = [
"<187>{{ arubadate }} {{ host }} authmgr[4130]: <124198> <4130> <ERRS> <{{ host }} 10.10.10.10> {00:00:00:00:00:00-??} Missing server in attribute list, auth=VPN, utype=L3.",
"<187>{{ arubadate }} {{ host }} stm[4133]: <399803> <4133> <ERRS> <{{ host }} 10.10.10.10> An internal system error has occurred at file sapm_ap_mgmt.c function sapm_get_img_build_version_str line 11853 error stat /mswitch/sap/mips64.ari failed: No such file or directory.",
"<188>{{ arubadate }} {{ host }} wms[4096]: <126005> <4096> <WARN> <{{ host }} 10.10.10.10> |ids| Interfering AP: The system classified an access point (BSSID 00:0e:8e:96:f4:32 and SSID on CHANNEL 36) as interfering. Additional Info: Detector-AP-Name:00:0b:86:9e:6b:5f; Detector-AP-MAC:24:de:c6:70:2c:90; Detector-AP-Radio:1.",
"<188>{{ arubadate }} {{ host }} sapd[1362]: <127037> <WARN> |AP 00:0b:86:eb:4e:[email protected] sapd| |ids-ap| AP(04:bd:88:8a:3a:60): Station Associated to Rogue AP: An AP detected a client a4:8d:3b:ae:68:68 associated to a rogue access point (BSSID 98:1e:19:31:63:b6 and SSID MySpectrumWiFib0-2G on CHANNEL 11).",
]
@pytest.mark.parametrize("event", testdata)
def test_aruba(
record_property, setup_wordlist, get_host_key, setup_splunk, setup_sc4s, event
):
host = get_host_key
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
arubadate = dt.strftime("%b %d %H:%M:%S %Y")
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(event + "\n")
message = mt.render(mark="<188>", bsd=bsd, host=host, arubadate=arubadate)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search index=netops _time={{ epoch }} sourcetype="aruba:syslog" host={{ host }}'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| 54.245902 | 332 | 0.686008 | [
"BSD-2-Clause",
"CC0-1.0"
] | iainrose/splunk-connect-for-syslog | tests/test_aruba.py | 3,309 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hackcrisis.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.636364 | 74 | 0.684127 | [
"MIT"
] | jakubzadrozny/hackcrisis | manage.py | 630 | Python |
# This script fetches Atelier 801 translation file and adds the required IDs into our own translation files
import sys
from urllib.request import urlopen
import zlib
from string import Template
import json
if len(sys.argv) < 2:
print("Please pass in lang code for first arguement")
exit()
lang = sys.argv[1]
url = 'https://www.transformice.com/langues/tfm-'+lang+'.gz'
# Fetch file
response = urlopen(url)
filedata = response.read()
filedata = zlib.decompress(filedata)
filedata = bytes.decode(filedata)
# Parse file
filedata = filedata.split("\n-\n")
i18n = {}
for data in filedata:
if(not data): continue
key,val = data.split("=", 1)
i18n[key] = val
# Use data to do the actual thing this tool is for
def desc(key, arg1=None):
if(arg1 != None):
return i18n[key].replace("%1", arg1)
return i18n[key]
transKeys = [
"C_GuideSprirituel",
"C_MaitresseDuVent",
"C_Mecanicienne",
"C_Sauvageonne",
"C_Physicienne",
"C_14", "C_14_T",
"C_11", "C_11_T",
"C_12", "C_12_T",
"C_13", "C_13_T",
"C_8", "C_8_T",
"C_9", "C_9_T",
"C_10", "C_10_T",
"C_5", "C_5_T",
"C_6", "C_6_T",
"C_7", "C_7_T",
"C_2", "C_2_T",
"C_3", "C_3_T",
"C_4", "C_4_T",
"C_0", "C_0_T",
"C_1", "C_1_T",
"C_34", "C_34_T",
"C_31", "C_31_T",
"C_32", "C_32_T",
"C_33", "C_33_T",
"C_28", "C_28_T",
"C_29", "C_29_T",
"C_30", "C_30_T",
"C_25", "C_25_T",
"C_26", "C_26_T",
"C_27", "C_27_T",
"C_22", "C_22_T",
"C_23", "C_23_T",
"C_24", "C_24_T",
"C_20", "C_20_T",
"C_21", "C_21_T",
"C_54", "C_54_T",
"C_51", "C_51_T",
"C_52", "C_52_T",
"C_53", "C_53_T",
"C_48", "C_48_T",
"C_49", "C_49_T",
"C_50", "C_50_T",
"C_45", "C_45_T",
"C_46", "C_46_T",
"C_47", "C_47_T",
"C_42", "C_42_T",
"C_43", "C_43_T",
"C_44", "C_44_T",
"C_40", "C_40_T",
"C_41", "C_41_T",
"C_94", "C_94_T",
"C_80", "C_80_T",
"C_93", "C_93_T",
"C_70", "C_70_T",
"C_72", "C_72_T",
"C_81", "C_81_T",
"C_92", "C_92_T",
"C_66", "C_66_T",
"C_71", "C_71_T",
"C_73", "C_73_T",
"C_68", "C_68_T",
"C_88", "C_88_T",
"C_84", "C_84_T",
"C_86", "C_86_T",
"C_89", "C_89_T",
"C_91", "C_91_T",
"C_83", "C_83_T",
"C_85", "C_85_T",
"C_90", "C_90_T",
"C_63", "C_63_T",
"C_74", "C_74_T",
"C_87", "C_87_T",
"C_82", "C_82_T",
"C_60", "C_60_T",
"C_64", "C_64_T",
"C_65", "C_65_T",
"C_69", "C_69_T",
"C_67", "C_67_T",
"C_61", "C_61_T",
"C_62", "C_62_T",
]
i18nToWrite = {}
for key in transKeys:
i18nToWrite[key] = i18n[key]
with open(lang+'.json', 'w') as outfile:
# outfile.write(i18nToWrite)
json.dump(i18nToWrite, outfile, indent=4) | 20.015873 | 107 | 0.607851 | [
"MIT"
] | fewfre/TransformiceSkillTreeBuilder | i18n/_tfm_trans_to_skilldata.py | 2,522 | Python |
#!/usr/bin/env python
__author__ = ('Duy Tin Truong ([email protected]), '
'Aitor Blanco Miguez ([email protected])')
__version__ = '3.0'
__date__ = '21 Feb 2020'
import argparse as ap
import dendropy
from io import StringIO
import re
from collections import defaultdict
import matplotlib.colors as colors
import subprocess
def read_params():
p = ap.ArgumentParser()
p.add_argument('-t', '--ifn_tree',
required=True,
default=None,
type=str,
help='The input tree in newick format.')
p.add_argument('-m', '--colorized_metadata',
required=False,
default='unset',
type=str,
help='The metadata field to colorize. Default "unset".')
p.add_argument('--fig_size',
required=False,
default=8,
type=float,
help='The figure size. Default "8".')
p.add_argument('--legend_marker_size',
required=False,
default=20,
type=int,
help='The legend marker size. Default "20".'
)
p.add_argument('--legend_font_size',
required=False,
default=10,
type=int,
help='The legend font size. Default "10".'
)
p.add_argument('--legend_marker_edge_width',
required=False,
default=0.2,
type=float,
help='The legend marker edge width. Default "0.2".'
)
p.add_argument('--leaf_marker_size',
required=False,
default=20,
type=int,
help='The legend marker size. Default "20".'
)
p.add_argument('--leaf_marker_edge_width',
required=False,
default=0.2,
type=float,
help='The legend marker edge width. Default "0.2".'
)
p.add_argument('--dpi',
required=False,
default=300,
type=int,
help='The figure dpi.')
p.add_argument('--figure_extension',
required=False,
default='.png',
type=str,
help='The figure extension. Default ".png".')
p.add_argument('--ofn_prefix',
required=False,
default=None,
type=str,
help='The prefix of output files.')
return p.parse_args()
def run(cmd):
print (cmd)
subprocess.call(cmd.split())
def main():
args = read_params()
tree = dendropy.Tree.get_from_path(args.ifn_tree, schema='newick',
preserve_underscores=True)
tree.reroot_at_midpoint()
count = 0
metadatas = set([])
node2metadata = {}
for node in tree.preorder_node_iter():
nodestr = node.__getattribute__("taxon").__str__().strip("'")
if node.is_leaf():
if '.' in nodestr:
nodestr = nodestr.replace('.',',')
node.taxon = dendropy.Taxon(label=nodestr)
substrs = re.findall(
'%s-[a-zA-Z0-9.]*'%args.colorized_metadata,
nodestr)
if substrs:
md = substrs[0].replace(args.colorized_metadata + '-', '')
metadatas.add(md)
node2metadata[nodestr] = md
else:
count += 1
node.taxon = dendropy.Taxon(label='node_%d'%count)
metadatas = sorted(list(metadatas))
color_names = list(colors.cnames.keys())
metadata2color = {}
for i, md in enumerate(metadatas):
metadata2color[md] = color_names[i % len(color_names)]
if not args.ofn_prefix:
args.ofn_prefix = args.ifn_tree
ofn_tree = args.ofn_prefix + '.graphlantree'
tree.write_to_path(ofn_tree, 'newick')
ofn_annot = args.ofn_prefix + '.annot'
with open(ofn_annot, 'w') as ofile:
#ofile.write('clade_separation\t0\n')
ofile.write('branch_bracket_width\t0\n')
#ofile.write('clade_separation\t0.15\n')
ofile.write('branch_bracket_depth\t0\n')
#ofile.write('branch_thickness\t1.25\n')
ofile.write('annotation_background_width\t0\n')
# legend
ofile.write('#legends\n')
ofile.write('class_legend_font_size\t%d\n'%args.legend_font_size)
for md in metadata2color:
ofile.write('%s\tclade_marker_size\t%d\n'%(md, args.legend_marker_size))
ofile.write('%s\tclade_marker_color\t%s\n'%(md, metadata2color[md]))
ofile.write('%s\tclade_marker_edge_width\t%f\n'%(md, args.legend_marker_edge_width))
# remove intermedate nodes
for node in tree.preorder_node_iter():
if not node.is_leaf():
nodestr = node.__getattribute__("taxon").__str__().strip("'")
ofile.write('%s\tclade_marker_size\t0\n'%(nodestr))
# colorize leaf nodes
for node in tree.seed_node.leaf_nodes():
nodestr = node.__getattribute__("taxon").__str__().strip("'")
if nodestr in node2metadata:
leaf_color = metadata2color[node2metadata[nodestr]]
ofile.write('%s\tclade_marker_size\t%d\n'%(nodestr, args.leaf_marker_size))
ofile.write('%s\tclade_marker_color\t%s\n'%(nodestr, leaf_color))
ofile.write('%s\tclade_marker_edge_width\t%f\n'%(nodestr, args.leaf_marker_edge_width))
ofn_xml = args.ofn_prefix + '.xml'
cmd = 'graphlan_annotate.py --annot %s %s %s'%(ofn_annot, ofn_tree, ofn_xml)
run(cmd)
ofn_fig = args.ofn_prefix + args.figure_extension
cmd = 'graphlan.py %s %s --dpi %d --size %f'%(ofn_xml, ofn_fig, args.dpi, args.fig_size)
run(cmd)
print ('Output file: %s'%ofn_fig)
if __name__ == '__main__':
main()
| 37.527607 | 103 | 0.542586 | [
"MIT"
] | Adrian-Howard/MetaPhlAn | metaphlan/utils/plot_tree_graphlan.py | 6,117 | Python |
from django.db import models
from django.urls import reverse
import uuid # Required for unique book instances
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200,
help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Language(models.Model):
"""
Model representing a Language (e.g. Russian, English etc.)
"""
name = models.CharField(max_length=200,
help_text="Enter a book language (e.g. Russian, English etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Book(models.Model):
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN',max_length=13,
help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('book-detail', args=[str(self.id)])
class BookInstance(models.Model):
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='m', help_text='Book availability')
class Meta:
ordering = ["due_back"]
def __str__(self):
"""
String for representing the Model object
"""
return '{0} ({1})'.format(self.id, self.book.title)
class Author(models.Model):
"""
Model representing an author.
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '{0} ({1})'.format(self.last_name, self.first_name)
| 32.084746 | 132 | 0.651083 | [
"MIT"
] | zhekazuev/mozilla-django-learning | src/locallibrary/catalog/models.py | 3,786 | Python |
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
df.head()
X = df[['ages','num_reviews','piece_count','play_star_rating','review_difficulty','star_rating','theme_name','val_star_rating','country']]
y = df['list_price']
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 6, test_size = 0.3)
# code ends here
# --------------
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
# code starts here
cols = X_train.columns
#cols= list(X_train.columns.values)
sns.pairplot(df)
# code ends here
# --------------
# Code starts here
corr = X_train.corr()
print(corr)
X_train.drop(['play_star_rating', 'val_star_rating'], axis = 1,inplace = True)
X_test.drop(['play_star_rating', 'val_star_rating'], axis = 1,inplace = True)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import math
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
def metrics(actual,pred):
print('Mean Squared Error', mean_squared_error(actual,pred))
print('R-Squared', r2_score(actual,pred))
metrics(y_test,y_pred)
mse = 2106.7634311857673
r2 = 0.7747160273433752
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
plt.hist(residual)
# Code ends here
| 21.704225 | 138 | 0.720311 | [
"MIT"
] | iamacityzen/ga-learner-dsmp-repo | Making-first-prediction-using-linear-regression/code.py | 1,541 | Python |
from dgl.data import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset
def load(name):
if name == 'cora':
dataset = CoraGraphDataset()
elif name == 'citeseer':
dataset = CiteseerGraphDataset()
elif name == 'pubmed':
dataset = PubmedGraphDataset()
graph = dataset[0]
train_mask = graph.ndata.pop('train_mask')
test_mask = graph.ndata.pop('test_mask')
feat = graph.ndata.pop('feat')
labels = graph.ndata.pop('label')
return graph, feat, labels, train_mask, test_mask | 28.315789 | 79 | 0.667286 | [
"Apache-2.0"
] | 905355494/dgl | examples/pytorch/grace/dataset.py | 538 | Python |
from enum import IntEnum
from typing import Dict, Union, Callable, Any
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
alert_priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration_sound: float,
duration_hud_alert: float,
duration_text: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.alert_priority = alert_priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration_sound = duration_sound
self.duration_hud_alert = duration_hud_alert
self.duration_text = duration_text
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.start_time = 0.
self.alert_type = ""
self.event_type = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.alert_priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.alert_priority > alert2.alert_priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, audible_alert=AudibleAlert.chimeError,
visual_alert=VisualAlert.none, duration_hud_alert=2.):
super().__init__("오픈파일럿 사용불가", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
audible_alert, .4, duration_hud_alert, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.chimeError, .1, 2., 2.),
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2, alert_text_1="핸들을 즉시 잡아주세요"):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, 2.2, 3., 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert=True):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2, 0., 0.),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str, duration_text: float = 0.2):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., duration_text),
# ********** alert callback functions **********
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(round(CP.minSteerSpeed * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = "km/h" if metric else "mph"
return Alert(
"핸들을 잡아주세요",
"%d %s 이상의 속도에서 자동조향됩니다" % (speed, unit),
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, 0., 0.4, .3)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(MIN_SPEED_FILTER * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
unit = "km/h" if metric else "mph"
return Alert(
"캘리브레이션 진행중입니다 : %d%%" % sm['liveCalibration'].calPerc,
"속도를 %d %s 이상으로 주행해주세요" % (speed, unit),
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
gps_integrated = sm['pandaState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"GPS 수신불량",
"GPS 연결상태 및 안테나를 점검하세요" if gps_integrated else "GPS 안테나를 점검하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
text = "크루즈 비활성상태"
if CP.carName == "honda":
text = "메인 스위치 OFF"
return NoEntryAlert(text, duration_hud_alert=0.)
def startup_fuzzy_fingerprint_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
return Alert(
"WARNING: No Exact Match on Car Model",
f"Closest Match: {CP.carFingerprint.title()[:40]}",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
alc_timer = sm['lateralPlan'].autoLaneChangeTimer
return Alert(
"자동차선변경이 %d초 뒤에 시작됩니다" % alc_timer,
"차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.steerRequired, AudibleAlert.none, 0., .1, .1, alert_rate=0.75)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
return Alert(
"Joystick Mode",
f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .1)
EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, bool], Alert]]]] = {
# ********** events with no alerts **********
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: Alert(
"조이스틱 모드",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 0.1),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("Controls Initializing"),
},
EventName.startup: {
ET.PERMANENT: Alert(
"오픈파일럿 사용준비 완료",
"항상 핸들을 잡고 도로를 주시하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 3.),
},
EventName.startupMaster: {
ET.PERMANENT: Alert(
"오픈파일럿 사용준비 완료",
"항상 핸들을 잡고 도로를 주시하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 3.),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: Alert(
"대시캠 모드",
"항상 핸들을 잡고 도로를 주시하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: Alert(
"대시캠 모드 : 호환되지않는 차량",
"항상 핸들을 잡고 도로를 주시하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
# openpilot uses the version strings from various ECUs to detect the correct car model.
# Usually all ECUs are recognized and an exact match to a car model can be made. Sometimes
# one or two ECUs have unrecognized versions, but the others are present in the database.
# If openpilot is confident about the match to a car model, it fingerprints anyway.
# In this case an alert is thrown since there is a small chance the wrong car was detected
# and the user should pay extra attention.
# This alert can be prevented by adding all ECU firmware version to openpilot:
# https://github.com/commaai/openpilot/wiki/Fingerprinting
EventName.startupFuzzyFingerprint: {
ET.PERMANENT: startup_fuzzy_fingerprint_alert,
},
EventName.startupNoFw: {
ET.PERMANENT: Alert(
"차량 인식 불가",
"모든 연결을 확인해 보세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.dashcamMode: {
ET.PERMANENT: Alert(
"대시캠 모드",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: Alert(
"차량 LKAS 버튼 상태확인",
"차량 LKAS 버튼 OFF후 활성화됩니다",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
# LOW priority to overcome Cruise Error
ET.PERMANENT: Alert(
"커뮤니티 기능 감지됨",
"개발자설정에서 커뮤니티 기능을 활성화해주세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: Alert(
"대시캠 모드",
"차량인식 불가 - 핑거프린트를 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.stockFcw: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
ET.NO_ENTRY: NoEntryAlert("Stock FCW: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.chimeWarningRepeat, 1., 2., 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"핸들을 잡아주세요",
"차선이탈 감지됨",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.ldw, AudibleAlert.chimePrompt, 1., 2., 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"가속패달감지시 오픈파일럿은 브레이크를 사용하지않습니다",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: SoftDisableAlert("Vehicle Parameter Identification Failed"),
ET.WARNING: Alert(
"차량 매개변수 식별 오류",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.steerRequired, AudibleAlert.none, .0, .0, .1),
},
EventName.steerTempUnavailableUserOverride: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 일시적으로 사용불가",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 1., 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요 : 운전자 도로주시 불안",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요",
"운전자 도로주시 불안",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 도로주시 불안",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요 : 운전자 인식 불가",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"운전자 응답하지않음",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 응답하지않음",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"수동으로 재활성화하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"앞차량 멈춤",
"앞차가 출발하면 자동 재출발",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"차선을 변경합니다",
"좌측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"차선을 변경합니다",
"우측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"후측방 차량감지",
"차선에 차량이 감지되니 대기하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, .1, .1, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"차선을 변경합니다",
"후측방 차량에 주의하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 제한을 초과함",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 1., 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("FAN 오작동", "하드웨어를 점검하세요"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("", ""),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Localizer unstable", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("브레이크 감지됨"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("주차 브레이크를 해제하세요"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("브레이크 감지됨",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("어뎁티브크루즈를 활성화하세요"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: SoftDisableAlert("조향제어 일시적으로 사용불가"),
ET.NO_ENTRY: NoEntryAlert("조향제어 일시적으로 사용불가",
duration_hud_alert=0.),
},
EventName.outOfSpace: {
ET.PERMANENT: Alert(
"저장공간 부족",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("저장공간 부족",
duration_hud_alert=0.),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: NoEntryAlert("속도를 높여주세요"),
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"장치 센서 오류",
"장치 점검후 재가동세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("장치 센서 오류"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("스피커가 감지되지않습니다", "이온을 재부팅 해주세요"),
ET.NO_ENTRY: NoEntryAlert("스피커가 감지되지않습니다"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("방해 수준이 너무높음"),
},
EventName.overheat: {
ET.PERMANENT: Alert(
"장치 과열됨",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.SOFT_DISABLE: SoftDisableAlert("장치 과열됨"),
ET.NO_ENTRY: NoEntryAlert("장치 과열됨"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: SoftDisableAlert("기어를 [D]로 변경하세요"),
ET.NO_ENTRY: NoEntryAlert("기어를 [D]로 변경하세요"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("캘리브레이션 오류", "장치 위치변경후 캘리브레이션을 다시하세요"),
ET.SOFT_DISABLE: SoftDisableAlert("캘리브레이션 오류 : 장치 위치변경후 캘리브레이션을 다시하세요"),
ET.NO_ENTRY: NoEntryAlert("캘리브레이션 오류 : 장치 위치변경후 캘리브레이션을 다시하세요"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: SoftDisableAlert("캘리브레이션 진행중입니다"),
ET.NO_ENTRY: NoEntryAlert("캘리브레이션 진행중입니다"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: SoftDisableAlert("도어 열림"),
ET.NO_ENTRY: NoEntryAlert("도어 열림"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: SoftDisableAlert("안전벨트를 착용해주세요"),
ET.NO_ENTRY: NoEntryAlert("안전벨트를 착용해주세요"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: SoftDisableAlert("ESP 꺼짐"),
ET.NO_ENTRY: NoEntryAlert("ESP 꺼짐"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: SoftDisableAlert("배터리 부족"),
ET.NO_ENTRY: NoEntryAlert("배터리 부족"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("장치 프로세스 통신오류"),
ET.NO_ENTRY: NoEntryAlert("장치 프로세스 통신오류",
audible_alert=AudibleAlert.chimeDisengage),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("시스템 오작동: 이온을 재부팅 하세요",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarFault: {
ET.SOFT_DISABLE: SoftDisableAlert("레이더 오류 : 차량을 재가동하세요"),
ET.NO_ENTRY : NoEntryAlert("레이더 오류 : 차량을 재가동하세요"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: SoftDisableAlert("주행모델 지연됨"),
ET.NO_ENTRY : NoEntryAlert("주행모델 지연됨"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: SoftDisableAlert("차선인식상태가 좋지않으니 주의운전하세요"),
ET.NO_ENTRY: NoEntryAlert("차선인식상태가 좋지않으니 주의운전하세요"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: SoftDisableAlert("장치가 마운트에서 떨어짐"),
ET.NO_ENTRY: NoEntryAlert("장치가 마운트에서 떨어짐"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: SoftDisableAlert("메모리 부족 : 장치를 재가동하세요"),
ET.PERMANENT: NormalPermanentAlert("메모리 부족", "장치를 재가동하세요"),
ET.NO_ENTRY : NoEntryAlert("메모리 부족 : 장치를 재가동하세요",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("크루즈 오류"),
ET.PERMANENT: NormalPermanentAlert("크루즈 오류", ""),
ET.NO_ENTRY: NoEntryAlert("크루즈 오류"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("컨트롤 불일치"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Road Camera Error", "",
duration_text=10.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Driver Camera Error", "",
duration_text=10.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Wide Road Camera Error", "",
duration_text=10.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: SoftDisableAlert("USB 에러: 이온을 재부팅 하세요"),
ET.PERMANENT: NormalPermanentAlert("USB 에러: 이온을 재부팅 하세요", ""),
ET.NO_ENTRY: NoEntryAlert("USB 에러: 이온을 재부팅 하세요"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN 오류 : 하드웨어를 점검하세요"),
ET.PERMANENT: Alert(
"CAN 오류 : 하드웨어를 점검하세요",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN 오류 : 하드웨어를 점검하세요"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS 오류 : 차량을 재가동하세요"),
ET.PERMANENT: Alert(
"LKAS 오류 : 차량을 재가동하세요",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("LKAS 오류 : 차량을 재가동하세요"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: Alert(
"크루즈 오류 : 차량을 재가동하세요",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("크루즈 오류 : 차량을 재가동하세요"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"기어 [R] 상태",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=0.5),
ET.SOFT_DISABLE: SoftDisableAlert("기어 [R] 상태"),
ET.NO_ENTRY: NoEntryAlert("기어 [R] 상태"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("크루즈 꺼짐"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.SOFT_DISABLE: SoftDisableAlert("플래너 솔루션 오류"),
ET.NO_ENTRY: NoEntryAlert("플래너 솔루션 오류"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("하네스 오작동"),
ET.PERMANENT: NormalPermanentAlert("하네스 오작동", "하드웨어를 점검하세요"),
ET.NO_ENTRY: NoEntryAlert("하네스 오작동"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"오픈파일럿 사용불가",
"근접 앞차량이 없습니다",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"오픈파일럿 사용불가",
"속도를 높이고 재가동하세요",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
},
# When the car is driving faster than most cars in the training data the model outputs can be unpredictable
EventName.speedTooHigh: {
ET.WARNING: Alert(
"속도가 너무 높습니다",
"속도를 줄여주세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, 2.2, 3., 4.),
ET.NO_ENTRY: Alert(
"속도가 너무 높습니다",
"속도를 줄이고 재가동하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.chimeError, .4, 2., 3.),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: Alert(
"크루즈 오류 : 차량을 재가동하세요",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("크루즈 오류 : 차량을 재가동하세요"),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"방향지시등 동작중에는 핸들을 잡아주세요",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .0, .0, .2),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
EventName.slowingDownSpeed: {
ET.PERMANENT: Alert("속도를 조절합니다","", AlertStatus.normal, AlertSize.small,
Priority.MID, VisualAlert.none, AudibleAlert.none, 0., .1, .1),
},
EventName.slowingDownSpeedSound: {
ET.PERMANENT: Alert("속도를 조절합니다","", AlertStatus.normal, AlertSize.small,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeSlowingDownSpeed, 2., 2., 2.),
},
}
| 33.876368 | 116 | 0.674547 | [
"MIT"
] | agegold/neokii_KR-1 | selfdrive/controls/lib/events.py | 34,085 | Python |
import numpy as np
import h5py
import argparse
import imageio
import tqdm
import os
from glob import glob
def main(args):
"""Main function to parse in Nuclei Dataset from Kaggle and store as HDF5
Parameters
----------
args: ArgumentParser()
input_dir: str
directory of the Nuclei data
output_dir: str
path to the HDF5 output directory
"""
# create hdf5
hdf5_fn = h5py.File(os.path.join(args.output_dir, "data_360.hdf5"), "a")
# get all data directory
data_dirs = glob(os.path.join(args.input_dir, "*/"))
with tqdm.tqdm(total=len(data_dirs), unit="folder") as progress_bar:
for path in data_dirs:
data_name = path.split("/")[-2]
x, y, masks = parse_data(path)
# TODO only use majority size for now
if x is None:
progress_bar.update(1)
continue
# stack x and y together
y = np.expand_dims(y, axis=0)
data = np.vstack((x,y,masks))
hdf5_fn.create_dataset(str(data_name), data=data, dtype=np.float, chunks=True)
progress_bar.update(1)
hdf5_fn.close()
def parse_data(path):
# define data folders
x_path = os.path.join(path, "images/")
y_path = os.path.join(path, "masks/")
# get all data paths
x_file = glob(os.path.join(x_path, "*.png"))[0]
y_files = glob(os.path.join(y_path, "*.png"))
# parse in data
x = imageio.imread(x_file)
# TODO only using majority shape
if x.shape != (256, 256, 4):
return None, None, None
masks = np.array([imageio.imread(y) for y in y_files])
y = np.zeros_like(masks[0])
for y_raw in masks:
y = np.maximum(y, y_raw)
# normalize
x = x / 255.0
y = y / 255.0
masks = masks / 255.0
# fix dimentions
x = np.transpose(x, (2,0,1)) # channels first
return x, y, masks
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
main(args)
| 23.879121 | 90 | 0.588127 | [
"Apache-2.0"
] | marshuang80/CellSegmentation | process_data/nuclei_create_hdf5.py | 2,173 | Python |
#11_Duplicate in an array N+1 integer
"""
Given an array of n elements that contains elements from 0 to n-1, with any of these numbers appearing any number of times. Find these repeating numbers in O(n) and using only constant memory space.
Example:
Input : n = 7 and array[] = {1, 2, 3, 6, 3, 6, 1}
Output: 1, 3, 6
Explanation: The numbers 1 , 3 and 6 appears more
than once in the array.
Input : n = 5 and array[] = {1, 2, 3, 4 ,3}
Output: 3
Explanation: The number 3 appears more than once
in the array.
"""
"""
Algorithm:
1. Traverse the array from start to end.
2. For every element,
take its absolute value and
if the abs(array[i])‘th element is positive, the element has not encountered before,
else if negative the element has been encountered before print the absolute value of the current element.
Complexity Analysis:
Time Complexity: O(n), only one traversal is needed, so time complexity is O(n)
Auxiliary Space: O(1), no extra space is required, so space complexity is constant
"""
def printRepeating(arr, size):
print("The repeating elements are :")
for i in range(0,size):
if arr[abs(arr[i])] > 0:
arr[abs(arr[i])] =-arr[abs(arr[i])]
else:
print(abs(arr[i]), end=" ")
arr = [1,2,3,1,3,6,6]
arr_size = len(arr)
printRepeating(arr, arr_size)
| 26.641509 | 199 | 0.640227 | [
"MIT"
] | iamParvezKhan25/Data-Structure-Algorithm | 11_Duplicate in an array N+1 integer.py | 1,414 | Python |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
_find_tensors)
from mmcv import print_log
from mmcv.utils import TORCH_VERSION, digit_version
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(DistributedDataParallel):
"""The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
"""
def to_kwargs(self, inputs, kwargs, device_id):
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
# to move all tensors to device_id
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
"""train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
def val_step(self, *inputs, **kwargs):
"""val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
| 42.568345 | 78 | 0.603008 | [
"Apache-2.0"
] | BIGWangYuDong/mmcv | mmcv/parallel/distributed.py | 5,917 | Python |
import datetime
import json
import logging
import re
import time
from google.appengine.ext import db
# from google.appengine.ext.db import djangoforms
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import taskqueue
from google.appengine.api import users
import settings
import util
#from django.forms import ModelForm
from collections import OrderedDict
from django import forms
# import google.appengine.ext.django as django
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
WEBCOMPONENTS = 1
MISC = 2
SECURITY = 3
MULTIMEDIA = 4
DOM = 5
FILE = 6
OFFLINE = 7
DEVICE = 8
COMMUNICATION = 9
JAVASCRIPT = 10
NETWORKING = 11
INPUT = 12
PERFORMANCE = 13
GRAPHICS = 14
CSS = 15
HOUDINI = 16
SERVICEWORKER = 17
WEBRTC = 18
LAYERED = 19
FEATURE_CATEGORIES = {
CSS: 'CSS',
WEBCOMPONENTS: 'Web Components',
MISC: 'Misc',
SECURITY: 'Security',
MULTIMEDIA: 'Multimedia',
DOM: 'DOM',
FILE: 'File APIs',
OFFLINE: 'Offline / Storage',
DEVICE: 'Device',
COMMUNICATION: 'Realtime / Communication',
JAVASCRIPT: 'JavaScript',
NETWORKING: 'Network / Connectivity',
INPUT: 'User input',
PERFORMANCE: 'Performance',
GRAPHICS: 'Graphics',
HOUDINI: 'Houdini',
SERVICEWORKER: 'Service Worker',
WEBRTC: 'Web RTC',
LAYERED: 'Layered APIs',
}
# Intent stages and mapping from stage to stage name.
INTENT_NONE = 0
INTENT_IMPLEMENT = 1
INTENT_EXPERIMENT = 2
INTENT_EXTEND_TRIAL = 3
INTENT_IMPLEMENT_SHIP = 4
INTENT_SHIP = 5
INTENT_REMOVE = 6
INTENT_STAGES = {
INTENT_NONE: 'None',
INTENT_IMPLEMENT: 'Prototype',
INTENT_EXPERIMENT: 'Experiment',
INTENT_EXTEND_TRIAL: 'Extend Origin Trial',
INTENT_IMPLEMENT_SHIP: 'Implement and Ship',
INTENT_SHIP: 'Ship',
INTENT_REMOVE: 'Remove',
}
NO_ACTIVE_DEV = 1
PROPOSED = 2
IN_DEVELOPMENT = 3
BEHIND_A_FLAG = 4
ENABLED_BY_DEFAULT = 5
DEPRECATED = 6
REMOVED = 7
ORIGIN_TRIAL = 8
INTERVENTION = 9
NO_LONGER_PURSUING = 1000 # insure bottom of list
# Ordered dictionary, make sure the order of this dictionary matches that of
# the sorted list above!
IMPLEMENTATION_STATUS = OrderedDict()
IMPLEMENTATION_STATUS[NO_ACTIVE_DEV] = 'No active development'
IMPLEMENTATION_STATUS[PROPOSED] = 'Proposed'
IMPLEMENTATION_STATUS[IN_DEVELOPMENT] = 'In development'
IMPLEMENTATION_STATUS[BEHIND_A_FLAG] = 'Behind a flag'
IMPLEMENTATION_STATUS[ENABLED_BY_DEFAULT] = 'Enabled by default'
IMPLEMENTATION_STATUS[DEPRECATED] = 'Deprecated'
IMPLEMENTATION_STATUS[REMOVED] = 'Removed'
IMPLEMENTATION_STATUS[ORIGIN_TRIAL] = 'Origin trial'
IMPLEMENTATION_STATUS[INTERVENTION] = 'Browser Intervention'
IMPLEMENTATION_STATUS[NO_LONGER_PURSUING] = 'No longer pursuing'
MAJOR_NEW_API = 1
MAJOR_MINOR_NEW_API = 2
SUBSTANTIVE_CHANGES = 3
MINOR_EXISTING_CHANGES = 4
EXTREMELY_SMALL_CHANGE = 5
FOOTPRINT_CHOICES = {
MAJOR_NEW_API: ('A major new independent API (e.g. adding a large # '
'independent concepts with many methods/properties/objects)'),
MAJOR_MINOR_NEW_API: ('Major changes to an existing API OR a minor new '
'independent API (e.g. adding a large # of new '
'methods/properties or introducing new concepts to '
'augment an existing API)'),
SUBSTANTIVE_CHANGES: ('Substantive changes to an existing API (e.g. small '
'number of new methods/properties)'),
MINOR_EXISTING_CHANGES: (
'Minor changes to an existing API (e.g. adding a new keyword/allowed '
'argument to a property/method)'),
EXTREMELY_SMALL_CHANGE: ('Extremely small tweaks to an existing API (e.g. '
'how existing keywords/arguments are interpreted)'),
}
MAINSTREAM_NEWS = 1
WARRANTS_ARTICLE = 2
IN_LARGER_ARTICLE = 3
SMALL_NUM_DEVS = 4
SUPER_SMALL = 5
VISIBILITY_CHOICES = {
MAINSTREAM_NEWS: 'Likely in mainstream tech news',
WARRANTS_ARTICLE: 'Will this feature generate articles on sites like developers.google.com/web/',
IN_LARGER_ARTICLE: 'Covered as part of a larger article but not on its own',
SMALL_NUM_DEVS: 'Only a very small number of web developers will care',
SUPER_SMALL: "So small it doesn't need to be covered in this dashboard",
}
SHIPPED = 1
IN_DEV = 2
PUBLIC_SUPPORT = 3
MIXED_SIGNALS = 4
NO_PUBLIC_SIGNALS = 5
PUBLIC_SKEPTICISM = 6
OPPOSED = 7
VENDOR_VIEWS = {
SHIPPED: 'Shipped',
IN_DEV: 'In development',
PUBLIC_SUPPORT: 'Public support',
MIXED_SIGNALS: 'Mixed public signals',
NO_PUBLIC_SIGNALS: 'No public signals',
PUBLIC_SKEPTICISM: 'Public skepticism',
OPPOSED: 'Opposed',
}
DEFACTO_STD = 1
ESTABLISHED_STD = 2
WORKING_DRAFT = 3
EDITORS_DRAFT = 4
PUBLIC_DISCUSSION = 5
NO_STD_OR_DISCUSSION = 6
STANDARDIZATION = {
DEFACTO_STD: 'De-facto standard',
ESTABLISHED_STD: 'Established standard',
WORKING_DRAFT: 'Working draft or equivalent',
EDITORS_DRAFT: "Editor's draft",
PUBLIC_DISCUSSION: 'Public discussion',
NO_STD_OR_DISCUSSION: 'No public standards discussion',
}
DEV_STRONG_POSITIVE = 1
DEV_POSITIVE = 2
DEV_MIXED_SIGNALS = 3
DEV_NO_SIGNALS = 4
DEV_NEGATIVE = 5
DEV_STRONG_NEGATIVE = 6
WEB_DEV_VIEWS = {
DEV_STRONG_POSITIVE: 'Strongly positive',
DEV_POSITIVE: 'Positive',
DEV_MIXED_SIGNALS: 'Mixed signals',
DEV_NO_SIGNALS: 'No signals',
DEV_NEGATIVE: 'Negative',
DEV_STRONG_NEGATIVE: 'Strongly negative',
}
def del_none(d):
"""
Delete dict keys with None values, and empty lists, recursively.
"""
for key, value in d.items():
if value is None or (isinstance(value, list) and len(value) == 0):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def list_to_chunks(l, n):
"""Yield successive n-sized chunk lists from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
class DictModel(db.Model):
# def to_dict(self):
# return dict([(p, unicode(getattr(self, p))) for p in self.properties()])
def format_for_template(self):
d = self.to_dict()
d['id'] = self.key().id()
return d
def to_dict(self):
output = {}
for key, prop in self.properties().iteritems():
value = getattr(self, key)
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
#ms = time.mktime(value.utctimetuple())
#ms += getattr(value, 'microseconds', 0) / 1000
#output[key] = int(ms)
output[key] = unicode(value)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(value, db.Model):
output[key] = to_dict(value)
elif isinstance(value, users.User):
output[key] = value.email()
else:
raise ValueError('cannot encode ' + repr(prop))
return output
class BlinkComponent(DictModel):
DEFAULT_COMPONENT = 'Blink'
COMPONENTS_URL = 'https://blinkcomponents-b48b5.firebaseapp.com'
COMPONENTS_ENDPOINT = '%s/blinkcomponents' % COMPONENTS_URL
WF_CONTENT_ENDPOINT = '%s/wfcomponents' % COMPONENTS_URL
name = db.StringProperty(required=True, default=DEFAULT_COMPONENT)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
@property
def subscribers(self):
return FeatureOwner.all().filter('blink_components = ', self.key()).order('name').fetch(None)
@property
def owners(self):
return FeatureOwner.all().filter('primary_blink_components = ', self.key()).order('name').fetch(None)
@classmethod
def fetch_all_components(self, update_cache=False):
"""Returns the list of blink components from live endpoint if unavailable in the cache."""
key = '%s|blinkcomponents' % (settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if components is None or update_cache:
components = []
result = urlfetch.fetch(self.COMPONENTS_ENDPOINT, deadline=60)
if result.status_code == 200:
components = sorted(json.loads(result.content))
memcache.set(key, components)
else:
logging.error('Fetching blink components returned: %s' % result.status_code)
return components
@classmethod
def fetch_wf_content_for_components(self, update_cache=False):
"""Returns the /web content that use each blink component."""
key = '%s|wfcomponents' % (settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if components is None or update_cache:
components = {}
result = urlfetch.fetch(self.WF_CONTENT_ENDPOINT, deadline=60)
if result.status_code == 200:
components = json.loads(result.content)
memcache.set(key, components)
else:
logging.error('Fetching /web blink components content returned: %s' % result.status_code)
return components
@classmethod
def update_db(self):
"""Updates the db with new Blink components from the json endpoint"""
self.fetch_wf_content_for_components(update_cache=True) # store /web content in memcache
new_components = self.fetch_all_components(update_cache=True)
existing_comps = self.all().fetch(None)
for name in new_components:
if not len([x.name for x in existing_comps if x.name == name]):
logging.info('Adding new BlinkComponent: ' + name)
c = BlinkComponent(name=name)
c.put()
@classmethod
def get_by_name(self, component_name):
"""Fetch blink component with given name."""
q = self.all()
q.filter('name =', component_name)
component = q.fetch(1)
if not component:
logging.error('%s is an unknown BlinkComponent.' % (component_name))
return None
return component[0]
# UMA metrics.
class StableInstance(DictModel):
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
property_name = db.StringProperty(required=True)
bucket_id = db.IntegerProperty(required=True)
date = db.DateProperty(verbose_name='When the data was fetched',
required=True)
#hits = db.IntegerProperty(required=True)
#total_pages = db.IntegerProperty()
day_percentage = db.FloatProperty()
rolling_percentage = db.FloatProperty()
class AnimatedProperty(StableInstance):
pass
class FeatureObserver(StableInstance):
pass
# Feature dashboard.
class Feature(DictModel):
"""Container for a feature."""
DEFAULT_MEMCACHE_KEY = '%s|features' % (settings.MEMCACHE_KEY_PREFIX)
MAX_CHUNK_SIZE = 500 # max num features to save for each memcache chunk.
@classmethod
def get_feature_chunk_memcache_keys(self, key_prefix):
num_features = len(Feature.all().fetch(limit=None, keys_only=True))
l = list_to_chunks(range(0, num_features), self.MAX_CHUNK_SIZE)
return ['%s|chunk%s' % (key_prefix, i) for i,val in enumerate(l)]
@classmethod
def set_feature_chunk_memcache_keys(self, key_prefix, feature_list):
chunks = list_to_chunks(feature_list, self.MAX_CHUNK_SIZE)
vals = []
for i, chunk in enumerate(chunks):
vals.append(('%s|chunk%s' % (key_prefix, i), chunk))
# d = OrderedDict(sorted(dict(vals).items(), key=lambda t: t[0]))
d = dict(vals)
return d
@classmethod
def _first_of_milestone(self, feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
if (str(f['shipped_milestone']) == str(milestone) or
f['impl_status_chrome'] == str(milestone)):
return i
elif (f['shipped_milestone'] == None and
str(f['shipped_android_milestone']) == str(milestone)):
return i
return -1
@classmethod
def _first_of_milestone_v2(self, feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
desktop_milestone = f['browsers']['chrome'].get('desktop', None)
android_milestone = f['browsers']['chrome'].get('android', None)
status = f['browsers']['chrome']['status'].get('text', None)
if (str(desktop_milestone) == str(milestone) or status == str(milestone)):
return i
elif (desktop_milestone == None and str(android_milestone) == str(milestone)):
return i
return -1
@classmethod
def _annotate_first_of_milestones(self, feature_list, version=None):
try:
omaha_data = util.get_omaha_data()
win_versions = omaha_data[0]['versions']
# Find the latest canary major version from the list of windows versions.
canary_versions = [x for x in win_versions if x.get('channel') and x.get('channel').startswith('canary')]
LATEST_VERSION = int(canary_versions[0].get('version').split('.')[0])
milestones = range(1, LATEST_VERSION + 1)
milestones.reverse()
versions = [
IMPLEMENTATION_STATUS[NO_ACTIVE_DEV],
IMPLEMENTATION_STATUS[PROPOSED],
IMPLEMENTATION_STATUS[IN_DEVELOPMENT],
IMPLEMENTATION_STATUS[DEPRECATED],
]
versions.extend(milestones)
versions.append(IMPLEMENTATION_STATUS[NO_LONGER_PURSUING])
first_of_milestone_func = Feature._first_of_milestone
if version == 2:
first_of_milestone_func = Feature._first_of_milestone_v2
last_good_idx = 0
for i, ver in enumerate(versions):
idx = first_of_milestone_func(feature_list, ver, start=last_good_idx)
if idx != -1:
feature_list[idx]['first_of_milestone'] = True
last_good_idx = idx
except Exception as e:
logging.error(e)
def format_for_template(self, version=None):
d = self.to_dict()
if version == 2:
if self.is_saved():
d['id'] = self.key().id()
else:
d['id'] = None
d['category'] = FEATURE_CATEGORIES[self.category]
if self.intent_stage is not None:
d['intent_stage'] = INTENT_STAGES[self.intent_stage]
d['created'] = {
'by': d.pop('created_by', None),
'when': d.pop('created', None),
}
d['updated'] = {
'by': d.pop('updated_by', None),
'when': d.pop('updated', None),
}
d['standards'] = {
'spec': d.pop('spec_link', None),
'status': {
'text': STANDARDIZATION[self.standardization],
'val': d.pop('standardization', None),
},
'visibility': {
'text': VISIBILITY_CHOICES[self.visibility],
'val': d.pop('visibility', None),
},
'footprint': {
'val': d.pop('footprint', None),
#'text': FOOTPRINT_CHOICES[self.footprint]
}
}
d['resources'] = {
'samples': d.pop('sample_links', []),
'docs': d.pop('doc_links', []),
}
d['tags'] = d.pop('search_tags', [])
d['browsers'] = {
'chrome': {
'bug': d.pop('bug_url', None),
'blink_components': d.pop('blink_components', []),
'owners': d.pop('owner', []),
'origintrial': self.impl_status_chrome == ORIGIN_TRIAL,
'intervention': self.impl_status_chrome == INTERVENTION,
'prefixed': d.pop('prefixed', False),
'flag': self.impl_status_chrome == BEHIND_A_FLAG,
'status': {
'text': IMPLEMENTATION_STATUS[self.impl_status_chrome],
'val': d.pop('impl_status_chrome', None)
},
'desktop': d.pop('shipped_milestone', None),
'android': d.pop('shipped_android_milestone', None),
'webview': d.pop('shipped_webview_milestone', None),
'ios': d.pop('shipped_ios_milestone', None),
},
'ff': {
'view': {
'text': VENDOR_VIEWS[self.ff_views],
'val': d.pop('ff_views', None),
'url': d.pop('ff_views_link', None),
'notes': d.pop('ff_views_notes', None),
}
},
'edge': {
'view': {
'text': VENDOR_VIEWS[self.ie_views],
'val': d.pop('ie_views', None),
'url': d.pop('ie_views_link', None),
'notes': d.pop('ie_views_notes', None),
}
},
'safari': {
'view': {
'text': VENDOR_VIEWS[self.safari_views],
'val': d.pop('safari_views', None),
'url': d.pop('safari_views_link', None),
'notes': d.pop('safari_views_notes', None),
}
},
'webdev': {
'view': {
'text': WEB_DEV_VIEWS[self.web_dev_views],
'val': d.pop('web_dev_views', None),
'url': d.pop('web_dev_views_link', None),
'notes': d.pop('web_dev_views_notes', None),
}
}
}
if self.shipped_milestone:
d['browsers']['chrome']['status']['milestone_str'] = self.shipped_milestone
elif self.shipped_milestone is None and self.shipped_android_milestone:
d['browsers']['chrome']['status']['milestone_str'] = self.shipped_android_milestone
else:
d['browsers']['chrome']['status']['milestone_str'] = d['browsers']['chrome']['status']['text']
del d['created']
del_none(d) # Further prune response by removing null/[] values.
else:
if self.is_saved():
d['id'] = self.key().id()
else:
d['id'] = None
d['category'] = FEATURE_CATEGORIES[self.category]
if self.intent_stage is not None:
d['intent_stage'] = INTENT_STAGES[self.intent_stage]
d['visibility'] = VISIBILITY_CHOICES[self.visibility]
d['impl_status_chrome'] = IMPLEMENTATION_STATUS[self.impl_status_chrome]
d['meta'] = {
'origintrial': self.impl_status_chrome == ORIGIN_TRIAL,
'intervention': self.impl_status_chrome == INTERVENTION,
'needsflag': self.impl_status_chrome == BEHIND_A_FLAG,
}
if self.shipped_milestone:
d['meta']['milestone_str'] = self.shipped_milestone
elif self.shipped_milestone is None and self.shipped_android_milestone:
d['meta']['milestone_str'] = self.shipped_android_milestone
else:
d['meta']['milestone_str'] = d['impl_status_chrome']
d['ff_views'] = {'value': self.ff_views,
'text': VENDOR_VIEWS[self.ff_views]}
d['ie_views'] = {'value': self.ie_views,
'text': VENDOR_VIEWS[self.ie_views]}
d['safari_views'] = {'value': self.safari_views,
'text': VENDOR_VIEWS[self.safari_views]}
d['standardization'] = {'value': self.standardization,
'text': STANDARDIZATION[self.standardization]}
d['web_dev_views'] = {'value': self.web_dev_views,
'text': WEB_DEV_VIEWS[self.web_dev_views]}
return d
def format_for_edit(self):
d = self.to_dict()
#d['id'] = self.key().id
d['owner'] = ', '.join(self.owner)
d['explainer_links'] = '\r\n'.join(self.explainer_links)
d['doc_links'] = '\r\n'.join(self.doc_links)
d['sample_links'] = '\r\n'.join(self.sample_links)
d['search_tags'] = ', '.join(self.search_tags)
d['blink_components'] = self.blink_components[0] #TODO: support more than one component.
return d
@classmethod
def get_all(self, limit=None, order='-updated', filterby=None,
update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, order, limit)
# TODO(ericbidelman): Support more than one filter.
if filterby is not None:
s = ('%s%s' % (filterby[0], filterby[1])).replace(' ', '')
KEY += '|%s' % s
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
query = Feature.all().order(order) #.order('name')
# TODO(ericbidelman): Support more than one filter.
if filterby:
query.filter(filterby[0], filterby[1])
features = query.fetch(limit)
feature_list = [f.format_for_template() for f in features]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_all_with_statuses(self, statuses, update_cache=False):
if not statuses:
return []
KEY = '%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, sorted(statuses))
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
# There's no way to do an OR in a single datastore query, and there's a
# very good chance that the self.get_all() results will already be in
# memcache, so use an array comprehension to grab the features we
# want from the array of everything.
feature_list = [feature for feature in self.get_all(update_cache=update_cache)
if feature['impl_status_chrome'] in statuses]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_feature(self, feature_id, update_cache=False):
KEY = '%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, feature_id)
feature = memcache.get(KEY)
if feature is None or update_cache:
unformatted_feature = Feature.get_by_id(feature_id)
if unformatted_feature:
feature = unformatted_feature.format_for_template()
feature['updated_display'] = unformatted_feature.updated.strftime("%Y-%m-%d")
feature['new_crbug_url'] = unformatted_feature.new_crbug_url()
memcache.set(KEY, feature)
return feature
@classmethod
def get_chronological(self, limit=None, update_cache=False, version=None):
KEY = '%s|%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY,
'cronorder', limit, version)
keys = Feature.get_feature_chunk_memcache_keys(KEY)
feature_list = memcache.get_multi(keys)
# If we didn't get the expected number of chunks back (or a cache update
# was requested), do a db query.
if len(feature_list.keys()) != len(keys) or update_cache:
# Features with no active, in dev, proposed features.
q = Feature.all()
q.order('impl_status_chrome')
q.order('name')
q.filter('impl_status_chrome <=', IN_DEVELOPMENT)
pre_release = q.fetch(None)
# Shipping features. Exclude features that do not have a desktop
# shipping milestone.
q = Feature.all()
q.order('-shipped_milestone')
q.order('name')
q.filter('shipped_milestone !=', None)
shipping_features = q.fetch(None)
# Features with an android shipping milestone but no desktop milestone.
q = Feature.all()
q.order('-shipped_android_milestone')
q.order('name')
q.filter('shipped_milestone =', None)
android_only_shipping_features = q.fetch(None)
# No longer pursuing features.
q = Feature.all()
q.order('impl_status_chrome')
q.order('name')
q.filter('impl_status_chrome =', NO_LONGER_PURSUING)
no_longer_pursuing_features = q.fetch(None)
shipping_features.extend(android_only_shipping_features)
shipping_features = [f for f in shipping_features if (IN_DEVELOPMENT < f.impl_status_chrome < NO_LONGER_PURSUING)]
def getSortingMilestone(feature):
feature._sort_by_milestone = (feature.shipped_milestone or
feature.shipped_android_milestone)
return feature
# Sort the feature list on either Android shipping milestone or desktop
# shipping milestone, depending on which is specified. If a desktop
# milestone is defined, that will take default.
shipping_features = map(getSortingMilestone, shipping_features)
# First sort by name, then sort by feature milestone (latest first).
shipping_features.sort(key=lambda f: f.name, reverse=False)
shipping_features.sort(key=lambda f: f._sort_by_milestone, reverse=True)
# Constructor the proper ordering.
pre_release.extend(shipping_features)
pre_release.extend(no_longer_pursuing_features)
feature_list = [f.format_for_template(version) for f in pre_release]
self._annotate_first_of_milestones(feature_list, version=version)
# Memcache doesn't support saving values > 1MB. Break up features list into
# chunks so we don't hit the limit.
memcache.set_multi(Feature.set_feature_chunk_memcache_keys(KEY, feature_list))
else:
temp_feature_list = []
# Reconstruct feature list by ordering chunks.
for key in sorted(feature_list.keys()):
temp_feature_list.extend(feature_list[key])
feature_list = temp_feature_list
return feature_list
@classmethod
def get_shipping_samples(self, limit=None, update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, 'samples', limit)
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
# Get all shipping features. Ordered by shipping milestone (latest first).
q = Feature.all()
q.filter('impl_status_chrome IN', [ENABLED_BY_DEFAULT, ORIGIN_TRIAL, INTERVENTION])
q.order('-impl_status_chrome')
q.order('-shipped_milestone')
q.order('name')
features = q.fetch(None)
# Get non-shipping features (sans removed or deprecated ones) and
# append to bottom of list.
q = Feature.all()
q.filter('impl_status_chrome <', ENABLED_BY_DEFAULT)
q.order('-impl_status_chrome')
q.order('-shipped_milestone')
q.order('name')
others = q.fetch(None)
features.extend(others)
# Filter out features without sample links.
feature_list = [f.format_for_template() for f in features
if len(f.sample_links)]
memcache.set(KEY, feature_list)
return feature_list
def crbug_number(self):
if not self.bug_url:
return
m = re.search(r'[\/|?id=]([0-9]+)$', self.bug_url)
if m:
return m.group(1)
def new_crbug_url(self):
url = 'https://bugs.chromium.org/p/chromium/issues/entry'
params = ['components=' + self.blink_components[0] or BlinkComponent.DEFAULT_COMPONENT]
crbug_number = self.crbug_number()
if crbug_number and self.impl_status_chrome in (
NO_ACTIVE_DEV,
PROPOSED,
IN_DEVELOPMENT,
BEHIND_A_FLAG,
ORIGIN_TRIAL,
INTERVENTION):
params.append('blocking=' + crbug_number)
if self.owner:
params.append('cc=' + ','.join(self.owner))
return url + '?' + '&'.join(params)
def __init__(self, *args, **kwargs):
super(Feature, self).__init__(*args, **kwargs)
# Stash existing values when entity is created so we can diff property
# values later in put() to know what's changed. https://stackoverflow.com/a/41344898
for prop_name, prop in self.properties().iteritems():
old_val = getattr(self, prop_name, None)
setattr(self, '_old_' + prop_name, old_val)
def __notify_feature_subscribers_of_changes(self, is_update):
"""Async notifies subscribers of new features and property changes to features by
posting to a task queue."""
# Diff values to see what properties have changed.
changed_props = []
for prop_name, prop in self.properties().iteritems():
new_val = getattr(self, prop_name, None)
old_val = getattr(self, '_old_' + prop_name, None)
if new_val != old_val:
changed_props.append({
'prop_name': prop_name, 'old_val': old_val, 'new_val': new_val})
payload = json.dumps({
'changes': changed_props,
'is_update': is_update,
'feature': self.format_for_template(version=2)
})
# Create task to email subscribers.
queue = taskqueue.Queue()#name='emailer')
task = taskqueue.Task(method="POST", url='/tasks/email-subscribers',
target='notifier', payload=payload)
queue.add(task)
# Create task to send push notifications
queue = taskqueue.Queue()
task = taskqueue.Task(method="POST", url='/tasks/send_notifications',
target='notifier', payload=payload)
queue.add(task)
def put(self, **kwargs):
is_update = self.is_saved()
key = super(Feature, self).put(**kwargs)
self.__notify_feature_subscribers_of_changes(is_update)
return key
# Metadata.
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
updated_by = db.UserProperty(auto_current_user=True)
created_by = db.UserProperty(auto_current_user_add=True)
intent_template_use_count = db.IntegerProperty(default = 0)
# General info.
category = db.IntegerProperty(required=True)
name = db.StringProperty(required=True)
intent_stage = db.IntegerProperty(default=0)
summary = db.StringProperty(required=True, multiline=True)
intent_to_implement_url = db.LinkProperty()
origin_trial_feedback_url = db.LinkProperty()
# A list of intent threads in the format "date|subject|url"
intent_threads = db.StringListProperty()
motivation = db.StringProperty(multiline=True)
# Chromium details.
bug_url = db.LinkProperty()
blink_components = db.StringListProperty(required=True, default=[BlinkComponent.DEFAULT_COMPONENT])
impl_status_chrome = db.IntegerProperty(required=True)
shipped_milestone = db.IntegerProperty()
shipped_android_milestone = db.IntegerProperty()
shipped_ios_milestone = db.IntegerProperty()
shipped_webview_milestone = db.IntegerProperty()
owner = db.ListProperty(db.Email)
footprint = db.IntegerProperty()
interop_compat_risks = db.StringProperty(multiline=True)
ergonomics_risks = db.StringProperty(multiline=True)
activation_risks = db.StringProperty(multiline=True)
security_risks = db.StringProperty(multiline=True)
debuggability = db.StringProperty(multiline=True)
all_platforms = db.BooleanProperty()
all_platforms_descr = db.StringProperty(multiline=True)
wpt = db.BooleanProperty()
wpt_descr = db.StringProperty(multiline=True)
visibility = db.IntegerProperty(required=True)
#webbiness = db.IntegerProperty() # TODO: figure out what this is
# Standards details.
standardization = db.IntegerProperty(required=True)
spec_link = db.LinkProperty()
tag_review = db.StringProperty(multiline=True)
prefixed = db.BooleanProperty()
explainer_links = db.StringListProperty()
ff_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
ie_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
safari_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
web_dev_views = db.IntegerProperty(required=True)
ff_views_link = db.LinkProperty()
ie_views_link = db.LinkProperty()
safari_views_link = db.LinkProperty()
web_dev_views_link = db.LinkProperty()
ff_views_notes = db.StringProperty(multiline=True)
ie_views_notes = db.StringProperty(multiline=True)
safari_views_notes = db.StringProperty(multiline=True)
web_dev_views_notes = db.StringProperty(multiline=True)
doc_links = db.StringListProperty()
sample_links = db.StringListProperty()
#tests = db.StringProperty()
search_tags = db.StringListProperty()
comments = db.StringProperty(multiline=True)
experiment_goals = db.StringProperty(multiline=True)
experiment_timeline = db.StringProperty(multiline=True)
experiment_risks = db.StringProperty(multiline=True)
experiment_extension_reason = db.StringProperty(multiline=True)
ongoing_constraints = db.StringProperty(multiline=True)
class PlaceholderCharField(forms.CharField):
def __init__(self, *args, **kwargs):
#super(forms.CharField, self).__init__(*args, **kwargs)
attrs = {}
if kwargs.get('placeholder'):
attrs['placeholder'] = kwargs.get('placeholder')
del kwargs['placeholder']
label = kwargs.get('label') or ''
if label:
del kwargs['label']
self.max_length = kwargs.get('max_length') or None
super(forms.CharField, self).__init__(label=label,
widget=forms.TextInput(attrs=attrs), *args, **kwargs)
# class PlaceholderForm(forms.Form):
# def __init__(self, *args, **kwargs):
# super(PlaceholderForm, self).__init__(*args, **kwargs)
# for field_name in self.fields:
# field = self.fields.get(field_name)
# if field:
# if type(field.widget) in (forms.TextInput, forms.DateInput):
# field.widget = forms.TextInput(attrs={'placeholder': field.label})
class FeatureForm(forms.Form):
SHIPPED_HELP_TXT = ('First milestone to ship with this '
'status. Applies to: Enabled by default, Behind a flag, '
'Origin trial, Browser Intervention, and Deprecated. If '
'the flag is \'test\' rather than \'experimental\' set '
'status to In development.')
# Note that the "required" argument in the following field definitions only
# mean so much in practice. There's various code in js/admin/feature_form.js,
# including intentStageChanged(), that adjusts what fields are required (as
# well as visible at all). IOW, when making changes to what form fields are or
# are not required, look both in the definitions here as well as in
# js/admin/feature_form.js and make sure the code works as intended.
#name = PlaceholderCharField(required=True, placeholder='Feature name')
name = forms.CharField(required=True, label='Feature',
help_text='Capitalize only the first letter and the beginnings of proper nouns.')
summary = forms.CharField(label='', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 500}),
help_text='Provide a one sentence description followed by one or two lines explaining how this feature helps web developers.')
category = forms.ChoiceField(required=True, help_text='Select the most specific category. If unsure, leave as "%s".' % FEATURE_CATEGORIES[MISC],
initial=MISC,
choices=sorted(FEATURE_CATEGORIES.items(), key=lambda x: x[1]))
intent_stage = forms.ChoiceField(required=True, label='Intent stage', help_text='Select the appropriate intent stage.',
initial=INTENT_IMPLEMENT,
choices=INTENT_STAGES.items())
current_user_email = users.get_current_user().email if users.get_current_user() else None
owner = forms.CharField(initial=current_user_email, required=True, label='Contact emails',
help_text='Comma separated list of full email addresses. Prefer @chromium.org.')
summary = forms.CharField(label='Feature summary', required=True, max_length=500,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 500}),
help_text='Summarize the feature using complete sentences as you would to an external developer using the feature.')
motivation = forms.CharField(label='Motivation', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Explain why the web needs this change. It may be useful to describe what web developers are forced to do without it. When possible, include links to back up your claims in the explainer.')
explainer_links = forms.CharField(label='Explainer link(s)', required=False,
widget=forms.Textarea(attrs={'rows': 4, 'cols': 50, 'maxlength': 500}),
help_text='Link to explainer(s) (one URL per line). You should have at least an explainer in hand and have shared it on a public forum before sending an Intent to Prototype in order to enable discussion with other browser vendors, standards bodies, or other interested parties.')
intent_to_implement_url = forms.URLField(required=False, label='Intent to Prototype link',
help_text='Link to the "Intent to Prototype" discussion thread.')
origin_trial_feedback_url = forms.URLField(required=False, label='Origin Trial feedback summary',
help_text='If your feature was available as an Origin Trial, link to a summary of usage and developer feedback. If not, leave this empty.')
doc_links = forms.CharField(label='Doc link(s)', required=False,
widget=forms.Textarea(attrs={'rows': 4, 'cols': 50, 'maxlength': 500}),
help_text='Links to design doc(s) (one URL per line), if and when available. [This is not required to send out an Intent to Prototype. Please update the intent thread with the design doc when ready]. An explainer and/or design doc is sufficient to start this process. [Note: Please include links and data, where possible, to support any claims.]')
standardization = forms.ChoiceField(
label='Standardization', choices=STANDARDIZATION.items(),
initial=EDITORS_DRAFT,
help_text=("The standardization status of the API. In bodies that don't "
"use this nomenclature, use the closest equivalent."))
spec_link = forms.URLField(required=False, label='Spec link',
help_text="Link to spec, if and when available. Please update the chromestatus.com entry and the intent thread(s) with the spec link when available.")
tag_review = forms.CharField(label='TAG Review', required=True,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 1480}),
help_text='Link(s) to TAG review(s), or explanation why this is not needed.')
interop_compat_risks = forms.CharField(label='Interoperability and Compatibility Risks', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Describe the degree of <a target="_blank" href="https://sites.google.com/a/chromium.org/dev/blink?pli=1#TOC-Policy-for-shipping-and-removing-web-platform-API-features">interoperability risk</a>. For a new feature, the main risk is that it fails to become an interoperable part of the web platform if other browsers do not implement it. For a removal, please review our <a target="_blank" href="https://docs.google.com/document/d/1RC-pBBvsazYfCNNUSkPqAVpSpNJ96U8trhNkfV0v9fk/edit">principles of web compatibility</a>.')
safari_views = forms.ChoiceField(label='Safari views',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
safari_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
safari_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}))
ff_views = forms.ChoiceField(label='Firefox views',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
ff_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
ff_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}))
ie_views = forms.ChoiceField(label='Edge',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
ie_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
ie_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}))
web_dev_views = forms.ChoiceField(
label='Web / Framework developer views',
choices=WEB_DEV_VIEWS.items(),
initial=DEV_NO_SIGNALS,
help_text='If unsure, default to "No signals".')
web_dev_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
web_dev_views_notes = forms.CharField(required=False, label='',
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'placeholder': 'Notes', 'maxlength': 1480}),
help_text='Reference known representative examples of opinion, both positive and negative.')
ergonomics_risks = forms.CharField(label='Ergonomics Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Are there any other platform APIs this feature will frequently be used in tandem with? Could the default usage of this API make it hard for Chrome to maintain good performance (i.e. synchronous return, must run on a certain thread, guaranteed return timing)?')
activation_risks = forms.CharField(label='Activation Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Will it be challenging for developers to take advantage of this feature immediately, as-is? Would this feature benefit from having polyfills, significant documentation and outreach, and/or libraries built on top of it to make it easier to use?')
security_risks = forms.CharField(label='Security Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='List any security considerations that were taken into account when deigning this feature.')
experiment_goals = forms.CharField(label='Experiment Goals', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Which pieces of the API surface are you looking to gain insight on? What metrics/measurement/feedback will you be using to validate designs? Double check that your experiment makes sense given that a large developer (e.g. a Google product or Facebook) likely can\'t use it in production due to the limits enforced by origin trials.\n\nIf Intent to Extend Origin Trial, highlight new/different areas for experimentation. Should not be an exact copy of goals from the first Intent to Experiment.')
experiment_timeline = forms.CharField(label='Experiment Timeline', required=False,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 1480}),
help_text='When does the experiment start and expire?')
experiment_risks = forms.CharField(label='Experiment Risks', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='When this experiment comes to an end are there any risks to the sites that were using it, for example losing access to important storage due to an experimental storage API?')
experiment_extension_reason = forms.CharField(label='Experiment Extension Reason', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='If this is a repeat experiment, link to the previous Intent to Experiment thread and explain why you want to extend this experiment.')
ongoing_constraints = forms.CharField(label='Ongoing Constraints', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Do you anticipate adding any ongoing technical constraints to the codebase while implementing this feature? We prefer to avoid features which require or assume a specific architecture. For most features, the answer here is "None."')
debuggability = forms.CharField(label='Debuggability', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Description of the desired DevTools debugging support for your feature. Consider emailing the <a href="https://groups.google.com/forum/?fromgroups#!forum/google-chrome-developer-tools">google-chrome-developer-tools</a> list for additional help. For new language features in V8 specifically, refer to the debugger support checklist. If your feature doesn\'t require changes to DevTools in order to provide a good debugging experience, feel free to leave this section empty.')
all_platforms = forms.BooleanField(required=False, initial=False, label='Supported on all platforms?',
help_text='Will this feature be supported on all six Blink platforms (Windows, Mac, Linux, Chrome OS, Android, and Android WebView)?')
all_platforms_descr = forms.CharField(label='Platform Support Explanation', required=False,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 2000}),
help_text='Explanation for why this feature is, or is not, supported on all platforms.')
wpt = forms.BooleanField(required=False, initial=False, label='Web Platform Tests', help_text='Is this feature fully tested in Web Platform Tests?')
wpt_descr = forms.CharField(label='Web Platform Tests Description', required=True,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Please link to the <a href="https://wpt.fyi/results">results on wpt.fyi</a>. If any part of the feature is not tested by web-platform-tests, please include links to issues, e.g. a web-platform-tests issue with the "infra" label explaining why a certain thing cannot be tested (<a href="https://github.com/w3c/web-platform-tests/issues/3867">example</a>), a spec issue for some change that would make it possible to test. (<a href="https://github.com/whatwg/fullscreen/issues/70">example</a>), or a Chromium issue to upstream some existing tests (<a href="https://bugs.chromium.org/p/chromium/issues/detail?id=695486">example</a>).')
sample_links = forms.CharField(label='Samples links', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 500}),
help_text='Links to samples (one URL per line).')
bug_url = forms.URLField(required=False, label='Tracking bug URL',
help_text='Tracking bug url (https://bugs.chromium.org/...). This bug should have "Type=Feature" set and be world readable.')
blink_components = forms.ChoiceField(
required=True,
label='Blink component',
help_text='Select the most specific component. If unsure, leave as "%s".' % BlinkComponent.DEFAULT_COMPONENT,
choices=[(x, x) for x in BlinkComponent.fetch_all_components()],
initial=[BlinkComponent.DEFAULT_COMPONENT])
impl_status_chrome = forms.ChoiceField(required=True,
label='Status in Chromium', choices=IMPLEMENTATION_STATUS.items())
#shipped_milestone = PlaceholderCharField(required=False,
# placeholder='First milestone the feature shipped with this status (either enabled by default or experimental)')
shipped_milestone = forms.IntegerField(required=False, label='',
help_text='Desktop:<br/>' + SHIPPED_HELP_TXT)
shipped_android_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for Android:</br/>' + SHIPPED_HELP_TXT)
shipped_ios_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for iOS (RARE):<br/>' + SHIPPED_HELP_TXT)
shipped_webview_milestone = forms.IntegerField(required=False, label='',
help_text='Android WebView:<br/>' + SHIPPED_HELP_TXT)
prefixed = forms.BooleanField(required=False, initial=False, label='Prefixed?')
footprint = forms.ChoiceField(label='Technical footprint',
choices=FOOTPRINT_CHOICES.items(), initial=MAJOR_MINOR_NEW_API)
visibility = forms.ChoiceField(
label='Developer visibility',
choices=VISIBILITY_CHOICES.items(),
initial=WARRANTS_ARTICLE,
help_text=('How much press / media / web developer buzz will this '
'feature generate?'))
search_tags = forms.CharField(label='Search tags', required=False,
help_text='Comma separated keywords used only in search')
comments = forms.CharField(label='Comments', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text='Additional comments, caveats, info...')
class Meta:
model = Feature
#exclude = ('shipped_webview_milestone',)
def __init__(self, *args, **keyargs):
super(FeatureForm, self).__init__(*args, **keyargs)
meta = getattr(self, 'Meta', None)
exclude = getattr(meta, 'exclude', [])
for field_name in exclude:
if field_name in self.fields:
del self.fields[field_name]
for field, val in self.fields.iteritems():
if val.required:
self.fields[field].widget.attrs['required'] = 'required'
class AppUser(DictModel):
"""Describes a user for whitelisting."""
#user = db.UserProperty(required=True, verbose_name='Google Account')
email = db.EmailProperty(required=True)
#is_admin = db.BooleanProperty(default=False)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
def list_with_component(l, component):
return [x for x in l if x.id() == component.key().id()]
def list_without_component(l, component):
return [x for x in l if x.id() != component.key().id()]
class FeatureOwner(DictModel):
"""Describes subscribers of a web platform feature."""
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
name = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
twitter = db.StringProperty()
blink_components = db.ListProperty(db.Key)
primary_blink_components = db.ListProperty(db.Key)
watching_all_features = db.BooleanProperty(default=False)
# def __eq__(self, other):
# return self.key().id() == other.key().id()
def add_to_component_subscribers(self, component_name):
"""Adds the user to the list of Blink component subscribers."""
c = BlinkComponent.get_by_name(component_name)
if c:
# Add the user if they're not already in the list.
if not len(list_with_component(self.blink_components, c)):
self.blink_components.append(c.key())
return self.put()
return None
def remove_from_component_subscribers(self, component_name, remove_as_owner=False):
"""Removes the user from the list of Blink component subscribers or as the owner
of the component."""
c = BlinkComponent.get_by_name(component_name)
if c:
if remove_as_owner:
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
else:
self.blink_components = list_without_component(self.blink_components, c)
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
return self.put()
return None
def add_as_component_owner(self, component_name):
"""Adds the user as the Blink component owner."""
c = BlinkComponent.get_by_name(component_name)
if c:
# Update both the primary list and blink components subscribers if the
# user is not already in them.
self.add_to_component_subscribers(component_name)
if not len(list_with_component(self.primary_blink_components, c)):
self.primary_blink_components.append(c.key())
return self.put()
return None
def remove_as_component_owner(self, component_name):
return self.remove_from_component_subscribers(component_name, remove_as_owner=True)
class HistogramModel(db.Model):
"""Container for a histogram."""
bucket_id = db.IntegerProperty(required=True)
property_name = db.StringProperty(required=True)
MAX_CHUNK_SIZE = 500 # max num features to save for each memcache chunk.
@classmethod
def get_property_chunk_memcache_keys(self, property_class, key_prefix):
num_props = len(property_class.all().fetch(limit=None, keys_only=True))
l = list_to_chunks(range(0, num_props), self.MAX_CHUNK_SIZE)
return ['%s|chunk%s' % (key_prefix, i) for i,val in enumerate(l)]
@classmethod
def set_property_chunk_memcache_keys(self, key_prefix, pop_list):
chunks = list_to_chunks(pop_list, self.MAX_CHUNK_SIZE)
vals = []
for i, chunk in enumerate(chunks):
vals.append(('%s|chunk%s' % (key_prefix, i), chunk))
d = dict(vals)
return d
@classmethod
def get_all(self):
output = {}
buckets = self.all().fetch(None)
for bucket in buckets:
output[bucket.bucket_id] = bucket.property_name
return output
class CssPropertyHistogram(HistogramModel):
pass
class FeatureObserverHistogram(HistogramModel):
pass
| 40.36664 | 649 | 0.693194 | [
"Apache-2.0"
] | cwilso/chromium-dashboard | models.py | 51,306 | Python |
# -*- coding: utf-8 -*-
import time
from pymongo import MongoClient
from config import MONGO_CONFIG
def get_current_time(format_str: str = '%Y-%m-%d %H:%M:%S'):
"""
获取当前时间,默认为 2020-01-01 00:00:00 格式
:param format_str: 格式
:return:
"""
return time.strftime(format_str, time.localtime())
class MongoDb:
def __init__(self):
"""初始化
初始化 mongo db
"""
mongo_uri = 'mongodb://%s:%s@%s:%s' % (
MONGO_CONFIG['user'],
MONGO_CONFIG['pwd'],
MONGO_CONFIG['host'],
MONGO_CONFIG['port'])
self.mongo = MongoClient(mongo_uri)
self.sogou_db = self.mongo['sogou_dev']
self.sogou_search_col = self.sogou_db['sogou_search_results']
# self.task_db = self.mongo['sogou_tast']
def update_sogou_login_cookie(self, username, cookie):
"""
更新搜狗微信登录 cookie 信息
:param username:
:param cookie:
:return:
"""
col = self.sogou_db['sogou_login_cookies']
ctime = get_current_time()
find_obj = {
'nickname': username,
'is_valid': 1,
}
login_item = col.find_one(find_obj)
print(login_item)
# 插入新数据
if not login_item:
cookie = 'DESC=0; %s' % cookie
col.insert_one({
'cookie': cookie,
'nickname': username,
'device': '0',
'state': 'normal',
'c_time': ctime,
'm_time': ctime,
'is_valid': 1,
'failures': 0,
})
return
# 更新原有数据
cookie = 'DESC=%s; %s' % (login_item['device'], cookie)
col.update_one(find_obj, {
'$set': {
'state': 'normal',
'cookie': cookie,
'c_time': ctime,
'm_time': ctime,
'failures': 0,
}
})
def insert_sogou_search_result(self, result):
"""
保存搜狗搜索信息
:param results: 结果数组
"""
ctime = get_current_time()
find_obj = {
'id': result['id'],
'is_valid': 1
}
search_item = self.sogou_search_col.find_one(find_obj)
print(search_item)
new_result = result
# 插入新数据
if not search_item:
new_result["c_time"] = ctime
new_result["m_time"] = ctime
new_result["is_valid"] = 1
self.sogou_search_col.insert_one(new_result)
return
# 更新原有数据
new_result["m_time"] = ctime
self.sogou_search_col.update_one(find_obj, {
'$set': new_result
}) | 25.551402 | 69 | 0.49305 | [
"MIT"
] | matiastang/selenium-learning | src/sogou_wechat/mongoDB.py | 2,862 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitso(Exchange):
def describe(self):
return self.deep_extend(super(bitso, self).describe(), {
'id': 'bitso',
'name': 'Bitso',
'countries': ['MX'], # Mexico
'rateLimit': 2000, # 30 requests per minute
'version': 'v3',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchDepositAddress': True,
'fetchFundingFee': False,
'fetchFundingFees': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87295554-11f98280-c50e-11ea-80d6-15b3bafa8cbf.jpg',
'api': 'https://api.bitso.com',
'www': 'https://bitso.com',
'doc': 'https://bitso.com/api_info',
'fees': 'https://bitso.com/fees',
'referral': 'https://bitso.com/?ref=itej',
},
'precisionMode': TICK_SIZE,
'options': {
'precision': {
'XRP': 0.000001,
'MXN': 0.01,
'TUSD': 0.01,
},
'defaultPrecision': 0.00000001,
},
'timeframes': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'4h': '14400',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
'api': {
'public': {
'get': [
'available_books',
'ticker',
'order_book',
'trades',
'ohlc',
],
},
'private': {
'get': [
'account_status',
'balance',
'fees',
'fundings',
'fundings/{fid}',
'funding_destination',
'kyc_documents',
'ledger',
'ledger/trades',
'ledger/fees',
'ledger/fundings',
'ledger/withdrawals',
'mx_bank_codes',
'open_orders',
'order_trades/{oid}',
'orders/{oid}',
'user_trades',
'user_trades/{tid}',
'withdrawals/',
'withdrawals/{wid}',
],
'post': [
'bitcoin_withdrawal',
'debit_card_withdrawal',
'ether_withdrawal',
'ripple_withdrawal',
'bcash_withdrawal',
'litecoin_withdrawal',
'orders',
'phone_number',
'phone_verification',
'phone_withdrawal',
'spei_withdrawal',
'ripple_withdrawal',
'bcash_withdrawal',
'litecoin_withdrawal',
],
'delete': [
'orders/{oid}',
'orders/all',
],
},
},
'exceptions': {
'0201': AuthenticationError, # Invalid Nonce or Invalid Credentials
'104': InvalidNonce, # Cannot perform request - nonce must be higher than 1520307203724237
'0304': BadRequest, # {"success":false,"error":{"code":"0304","message":"The field time_bucket() is either invalid or missing"}}
},
})
def fetch_markets(self, params={}):
response = self.publicGetAvailableBooks(params)
#
# {
# "success":true,
# "payload":[
# {
# "book":"btc_mxn",
# "minimum_price":"500",
# "maximum_price":"10000000",
# "minimum_amount":"0.00005",
# "maximum_amount":"500",
# "minimum_value":"5",
# "maximum_value":"10000000",
# "tick_size":"0.01",
# "fees":{
# "flat_rate":{"maker":"0.500","taker":"0.650"},
# "structure":[
# {"volume":"1500000","maker":"0.00500","taker":"0.00650"},
# {"volume":"2000000","maker":"0.00490","taker":"0.00637"},
# {"volume":"5000000","maker":"0.00480","taker":"0.00624"},
# {"volume":"7000000","maker":"0.00440","taker":"0.00572"},
# {"volume":"10000000","maker":"0.00420","taker":"0.00546"},
# {"volume":"15000000","maker":"0.00400","taker":"0.00520"},
# {"volume":"35000000","maker":"0.00370","taker":"0.00481"},
# {"volume":"50000000","maker":"0.00300","taker":"0.00390"},
# {"volume":"150000000","maker":"0.00200","taker":"0.00260"},
# {"volume":"250000000","maker":"0.00100","taker":"0.00130"},
# {"volume":"9999999999","maker":"0.00000","taker":"0.00130"},
# ]
# }
# },
# ]
# }
markets = self.safe_value(response, 'payload')
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'book')
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
fees = self.safe_value(market, 'fees', {})
flatRate = self.safe_value(fees, 'flat_rate', {})
takerString = self.safe_string(flatRate, 'taker')
makerString = self.safe_string(flatRate, 'maker')
taker = self.parse_number(Precise.string_div(takerString, '100'))
maker = self.parse_number(Precise.string_div(makerString, '100'))
feeTiers = self.safe_value(fees, 'structure', [])
fee = {
'taker': taker,
'maker': maker,
'percentage': True,
'tierBased': True,
}
takerFees = []
makerFees = []
for j in range(0, len(feeTiers)):
tier = feeTiers[j]
volume = self.safe_number(tier, 'volume')
takerFee = self.safe_number(tier, 'taker')
makerFee = self.safe_number(tier, 'maker')
takerFees.append([volume, takerFee])
makerFees.append([volume, makerFee])
if j == 0:
fee['taker'] = takerFee
fee['maker'] = makerFee
tiers = {
'taker': takerFees,
'maker': makerFees,
}
fee['tiers'] = tiers
defaultPricePrecision = self.safe_number(self.options['precision'], quote, self.options['defaultPrecision'])
result.append(self.extend({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'taker': taker,
'maker': maker,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(self.options['precision'], base, self.options['defaultPrecision']),
'price': self.safe_number(market, 'tick_size', defaultPricePrecision),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minimum_amount'),
'max': self.safe_number(market, 'maximum_amount'),
},
'price': {
'min': self.safe_number(market, 'minimum_price'),
'max': self.safe_number(market, 'maximum_price'),
},
'cost': {
'min': self.safe_number(market, 'minimum_value'),
'max': self.safe_number(market, 'maximum_value'),
},
},
'info': market,
}, fee))
return result
def parse_balance(self, response):
payload = self.safe_value(response, 'payload', {})
balances = self.safe_value(payload, 'balances')
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'locked')
account['total'] = self.safe_string(balance, 'total')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalance(params)
#
# {
# "success": True,
# "payload": {
# "balances": [
# {
# "currency": "bat",
# "available": "0.00000000",
# "locked": "0.00000000",
# "total": "0.00000000",
# "pending_deposit": "0.00000000",
# "pending_withdrawal": "0.00000000"
# },
# {
# "currency": "bch",
# "available": "0.00000000",
# "locked": "0.00000000",
# "total": "0.00000000",
# "pending_deposit": "0.00000000",
# "pending_withdrawal": "0.00000000"
# },
# ],
# },
# }
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'book': self.market_id(symbol),
}
response = self.publicGetOrderBook(self.extend(request, params))
orderbook = self.safe_value(response, 'payload')
timestamp = self.parse8601(self.safe_string(orderbook, 'updated_at'))
return self.parse_order_book(orderbook, symbol, timestamp, 'bids', 'asks', 'price', 'amount')
def parse_ticker(self, ticker, market=None):
#
# {
# "high":"37446.85",
# "last":"36599.54",
# "created_at":"2022-01-28T12:06:11+00:00",
# "book":"btc_usdt",
# "volume":"7.29075419",
# "vwap":"36579.1564400307",
# "low":"35578.52",
# "ask":"36574.76",
# "bid":"36538.22",
# "change_24":"-105.64"
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.parse8601(self.safe_string(ticker, 'created_at'))
vwap = self.safe_string(ticker, 'vwap')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = Precise.string_mul(baseVolume, vwap)
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'book': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
ticker = self.safe_value(response, 'payload')
#
# {
# "success":true,
# "payload":{
# "high":"37446.85",
# "last":"37051.96",
# "created_at":"2022-01-28T17:03:29+00:00",
# "book":"btc_usdt",
# "volume":"6.16176186",
# "vwap":"36582.6293169472",
# "low":"35578.52",
# "ask":"37083.62",
# "bid":"37039.66",
# "change_24":"478.45"
# }
# }
#
return self.parse_ticker(ticker, market)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'book': market['id'],
'time_bucket': self.timeframes[timeframe],
}
if since is not None:
request['start'] = since
if limit is not None:
duration = self.parse_timeframe(timeframe)
request['end'] = self.sum(since, duration * limit * 1000)
elif limit is not None:
now = self.milliseconds()
request['end'] = now
request['start'] = now - self.parse_timeframe(timeframe) * 1000 * limit
response = self.publicGetOhlc(self.extend(request, params))
#
# {
# "success":true,
# "payload": [
# {
# "bucket_start_time":1648219140000,
# "first_trade_time":1648219154990,
# "last_trade_time":1648219189441,
# "first_rate":"44958.60",
# "last_rate":"44979.88",
# "min_rate":"44957.33",
# "max_rate":"44979.88",
# "trade_count":8,
# "volume":"0.00082814",
# "vwap":"44965.02"
# },
# ]
# }
#
payload = self.safe_value(response, 'payload', [])
return self.parse_ohlcvs(payload, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m'):
#
# {
# "bucket_start_time":1648219140000,
# "first_trade_time":1648219154990,
# "last_trade_time":1648219189441,
# "first_rate":"44958.60",
# "last_rate":"44979.88",
# "min_rate":"44957.33",
# "max_rate":"44979.88",
# "trade_count":8,
# "volume":"0.00082814",
# "vwap":"44965.02"
# },
#
return [
self.safe_integer(ohlcv, 'bucket_start_time'),
self.safe_number(ohlcv, 'first_rate'),
self.safe_number(ohlcv, 'max_rate'),
self.safe_number(ohlcv, 'min_rate'),
self.safe_number(ohlcv, 'last_rate'),
self.safe_number(ohlcv, 'volume'),
]
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "book": "btc_usdt",
# "created_at": "2021-11-24T12:14:53+0000",
# "amount": "0.00026562",
# "maker_side": "sell",
# "price": "56471.55",
# "tid": "52557338"
# }
#
# fetchMyTrades(private)
#
# {
# "book": "btc_usdt",
# "created_at": "2021-11-24T12:31:03+0000",
# "minor": "11.30356000",
# "major": "-0.00020000",
# "fees_amount": "0.01119052",
# "fees_currency": "usdt",
# "minor_currency": "usdt",
# "major_currency": "btc",
# "oid": "djTzMIWx2Vi3iMjl",
# "tid": "52559051",
# "price": "56517.80",
# "side": "sell",
# "maker_side": "buy"
# }
#
# fetchOrderTrades(private)
#
# {
# "book": "btc_usdt",
# "created_at": "2021-11-24T12:30:52+0000",
# "minor": "-11.33047916",
# "major": "0.00020020",
# "fees_amount": "0.00000020",
# "fees_currency": "btc",
# "minor_currency": "usdt",
# "major_currency": "btc",
# "oid": "O0D2zcljjjQF5xlG",
# "tid": "52559030",
# "price": "56595.80",
# "side": "buy",
# "maker_side": "sell"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
marketId = self.safe_string(trade, 'book')
symbol = self.safe_symbol(marketId, market, '_')
side = self.safe_string_2(trade, 'side', 'maker_side')
makerSide = self.safe_string(trade, 'maker_side')
takerOrMaker = None
if side == makerSide:
takerOrMaker = 'maker'
else:
takerOrMaker = 'taker'
amount = self.safe_string_2(trade, 'amount', 'major')
if amount is not None:
amount = Precise.string_abs(amount)
fee = None
feeCost = self.safe_string(trade, 'fees_amount')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fees_currency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
cost = self.safe_string(trade, 'minor')
if cost is not None:
cost = Precise.string_abs(cost)
price = self.safe_string(trade, 'price')
orderId = self.safe_string(trade, 'oid')
id = self.safe_string(trade, 'tid')
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'book': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response['payload'], market, since, limit)
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privateGetFees(params)
#
# {
# success: True,
# payload: {
# fees: [
# {
# book: 'btc_mxn',
# fee_percent: '0.6500',
# fee_decimal: '0.00650000',
# taker_fee_percent: '0.6500',
# taker_fee_decimal: '0.00650000',
# maker_fee_percent: '0.5000',
# maker_fee_decimal: '0.00500000',
# volume_currency: 'mxn',
# current_volume: '0.00',
# next_volume: '1500000.00',
# next_maker_fee_percent: '0.490',
# next_taker_fee_percent: '0.637',
# nextVolume: '1500000.00',
# nextFee: '0.490',
# nextTakerFee: '0.637'
# },
# ...
# ],
# deposit_fees: [
# {
# currency: 'btc',
# method: 'rewards',
# fee: '0.00',
# is_fixed: False
# },
# ...
# ],
# withdrawal_fees: {
# ada: '0.20958100',
# bch: '0.00009437',
# ars: '0',
# btc: '0.00001209',
# ...
# }
# }
# }
#
payload = self.safe_value(response, 'payload', {})
fees = self.safe_value(payload, 'fees', [])
result = {}
for i in range(0, len(fees)):
fee = fees[i]
marketId = self.safe_string(fee, 'book')
symbol = self.safe_symbol(marketId, None, '_')
result[symbol] = {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(fee, 'maker_fee_decimal'),
'taker': self.safe_number(fee, 'taker_fee_decimal'),
'percentage': True,
'tierBased': True,
}
return result
def fetch_my_trades(self, symbol=None, since=None, limit=25, params={}):
self.load_markets()
market = self.market(symbol)
# the don't support fetching trades starting from a date yet
# use the `marker` extra param for that
# self is not a typo, the variable name is 'marker'(don't confuse with 'market')
markerInParams = ('marker' in params)
# warn the user with an exception if the user wants to filter
# starting from since timestamp, but does not set the trade id with an extra 'marker' param
if (since is not None) and not markerInParams:
raise ExchangeError(self.id + ' fetchMyTrades does not support fetching trades starting from a timestamp with the `since` argument, use the `marker` extra param to filter starting from an integer trade id')
# convert it to an integer unconditionally
if markerInParams:
params = self.extend(params, {
'marker': int(params['marker']),
})
request = {
'book': market['id'],
'limit': limit, # default = 25, max = 100
# 'sort': 'desc', # default = desc
# 'marker': id, # integer id to start from
}
response = self.privateGetUserTrades(self.extend(request, params))
return self.parse_trades(response['payload'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'book': self.market_id(symbol),
'side': side,
'type': type,
'major': self.amount_to_precision(symbol, amount),
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrders(self.extend(request, params))
id = self.safe_string(response['payload'], 'oid')
return {
'info': response,
'id': id,
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'oid': id,
}
return self.privateDeleteOrdersOid(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'partial-fill': 'open', # self is a common substitution in ccxt
'completed': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'oid')
side = self.safe_string(order, 'side')
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'book')
symbol = self.safe_symbol(marketId, market, '_')
orderType = self.safe_string(order, 'type')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'original_amount')
remaining = self.safe_string(order, 'unfilled_amount')
clientOrderId = self.safe_string(order, 'client_id')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': orderType,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'remaining': remaining,
'filled': None,
'status': status,
'fee': None,
'average': None,
'trades': None,
}, market)
def fetch_open_orders(self, symbol=None, since=None, limit=25, params={}):
self.load_markets()
market = self.market(symbol)
# the don't support fetching trades starting from a date yet
# use the `marker` extra param for that
# self is not a typo, the variable name is 'marker'(don't confuse with 'market')
markerInParams = ('marker' in params)
# warn the user with an exception if the user wants to filter
# starting from since timestamp, but does not set the trade id with an extra 'marker' param
if (since is not None) and not markerInParams:
raise ExchangeError(self.id + ' fetchOpenOrders does not support fetching orders starting from a timestamp with the `since` argument, use the `marker` extra param to filter starting from an integer trade id')
# convert it to an integer unconditionally
if markerInParams:
params = self.extend(params, {
'marker': int(params['marker']),
})
request = {
'book': market['id'],
'limit': limit, # default = 25, max = 100
# 'sort': 'desc', # default = desc
# 'marker': id, # integer id to start from
}
response = self.privateGetOpenOrders(self.extend(request, params))
orders = self.parse_orders(response['payload'], market, since, limit)
return orders
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privateGetOrdersOid({
'oid': id,
})
payload = self.safe_value(response, 'payload')
if isinstance(payload, list):
numOrders = len(response['payload'])
if numOrders == 1:
return self.parse_order(payload[0])
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'oid': id,
}
response = self.privateGetOrderTradesOid(self.extend(request, params))
return self.parse_trades(response['payload'], market)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'fund_currency': currency['id'],
}
response = self.privateGetFundingDestination(self.extend(request, params))
address = self.safe_string(response['payload'], 'account_identifier')
tag = None
if address.find('?dt=') >= 0:
parts = address.split('?dt=')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
def fetch_funding_fees(self, params={}):
self.load_markets()
response = self.privateGetFees(params)
#
# {
# success: True,
# payload: {
# fees: [
# {
# book: 'btc_mxn',
# fee_percent: '0.6500',
# fee_decimal: '0.00650000',
# taker_fee_percent: '0.6500',
# taker_fee_decimal: '0.00650000',
# maker_fee_percent: '0.5000',
# maker_fee_decimal: '0.00500000',
# volume_currency: 'mxn',
# current_volume: '0.00',
# next_volume: '1500000.00',
# next_maker_fee_percent: '0.490',
# next_taker_fee_percent: '0.637',
# nextVolume: '1500000.00',
# nextFee: '0.490',
# nextTakerFee: '0.637'
# },
# ...
# ],
# deposit_fees: [
# {
# currency: 'btc',
# method: 'rewards',
# fee: '0.00',
# is_fixed: False
# },
# ...
# ],
# withdrawal_fees: {
# ada: '0.20958100',
# bch: '0.00009437',
# ars: '0',
# btc: '0.00001209',
# ...
# }
# }
# }
#
payload = self.safe_value(response, 'payload', {})
depositFees = self.safe_value(payload, 'deposit_fees', [])
deposit = {}
for i in range(0, len(depositFees)):
depositFee = depositFees[i]
currencyId = self.safe_string(depositFee, 'currency')
code = self.safe_currency_code(currencyId)
deposit[code] = self.safe_number(depositFee, 'fee')
withdraw = {}
withdrawalFees = self.safe_value(payload, 'withdrawal_fees', [])
currencyIds = list(withdrawalFees.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
withdraw[code] = self.safe_number(withdrawalFees, currencyId)
return {
'info': response,
'deposit': deposit,
'withdraw': withdraw,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
methods = {
'BTC': 'Bitcoin',
'ETH': 'Ether',
'XRP': 'Ripple',
'BCH': 'Bcash',
'LTC': 'Litecoin',
}
currency = self.currency(code)
method = methods[code] if (code in methods) else None
if method is None:
raise ExchangeError(self.id + ' not valid withdraw coin: ' + code)
request = {
'amount': amount,
'address': address,
'destination_tag': tag,
}
classMethod = 'privatePost' + method + 'Withdrawal'
response = getattr(self, classMethod)(self.extend(request, params))
#
# {
# "success": True,
# "payload": [
# {
# "wid": "c5b8d7f0768ee91d3b33bee648318688",
# "status": "pending",
# "created_at": "2016-04-08T17:52:31.000+00:00",
# "currency": "btc",
# "method": "Bitcoin",
# "amount": "0.48650929",
# "details": {
# "withdrawal_address": "18MsnATiNiKLqUHDTRKjurwMg7inCrdNEp",
# "tx_hash": "d4f28394693e9fb5fffcaf730c11f32d1922e5837f76ca82189d3bfe30ded433"
# }
# },
# ]
# }
#
payload = self.safe_value(response, 'payload', [])
first = self.safe_value(payload, 0)
return self.parse_transaction(first, currency)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "wid": "c5b8d7f0768ee91d3b33bee648318688",
# "status": "pending",
# "created_at": "2016-04-08T17:52:31.000+00:00",
# "currency": "btc",
# "method": "Bitcoin",
# "amount": "0.48650929",
# "details": {
# "withdrawal_address": "18MsnATiNiKLqUHDTRKjurwMg7inCrdNEp",
# "tx_hash": "d4f28394693e9fb5fffcaf730c11f32d1922e5837f76ca82189d3bfe30ded433"
# }
# }
#
currency = self.safe_currency(None, currency)
return {
'id': self.safe_string(transaction, 'wid'),
'txid': None,
'timestamp': None,
'datetime': None,
'network': None,
'addressFrom': None,
'address': None,
'addressTo': None,
'amount': None,
'type': None,
'currency': currency['code'],
'status': None,
'updated': None,
'tagFrom': None,
'tag': None,
'tagTo': None,
'comment': None,
'fee': None,
'info': transaction,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
endpoint += '?' + self.urlencode(query)
url = self.urls['api'] + endpoint
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
request = ''.join([nonce, method, endpoint])
if method != 'GET':
if query:
body = self.json(query)
request += body
signature = self.hmac(self.encode(request), self.encode(self.secret))
auth = self.apiKey + ':' + nonce + ':' + signature
headers = {
'Authorization': 'Bitso ' + auth,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'success' in response:
#
# {"success":false,"error":{"code":104,"message":"Cannot perform request - nonce must be higher than 1520307203724237"}}
#
success = self.safe_value(response, 'success', False)
if isinstance(success, str):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
feedback = self.id + ' ' + self.json(response)
error = self.safe_value(response, 'error')
if error is None:
raise ExchangeError(feedback)
code = self.safe_string(error, 'code')
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
| 39.967262 | 220 | 0.454315 | [
"MIT"
] | AsquaredXIV/ccxt | python/ccxt/bitso.py | 40,287 | Python |
# list all object store access policies
res = client.get_object_store_access_policies()
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Valid fields: continuation_token, filter, ids, limit, names, offset, sort
# See section "Common Fields" for examples
| 37.375 | 75 | 0.77592 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | docs/source/examples/FB2.0/get_object_store_access_policies.py | 299 | Python |
import logging
import urllib.parse
from typing import Any, Dict, Optional, Type, Union
from globus_sdk import config, exc, utils
from globus_sdk.authorizers import GlobusAuthorizer
from globus_sdk.paging import PaginatorTable
from globus_sdk.response import GlobusHTTPResponse
from globus_sdk.scopes import ScopeBuilder
from globus_sdk.transport import RequestsTransport
log = logging.getLogger(__name__)
class BaseClient:
r"""
Abstract base class for clients with error handling for Globus APIs.
:param authorizer: A ``GlobusAuthorizer`` which will generate Authorization headers
:type authorizer: :class:`GlobusAuthorizer\
<globus_sdk.authorizers.base.GlobusAuthorizer>`
:param app_name: Optional "nice name" for the application. Has no bearing on the
semantics of client actions. It is just passed as part of the User-Agent
string, and may be useful when debugging issues with the Globus Team
:type app_name: str
:param transport_params: Options to pass to the transport for this client
:type transport_params: dict
All other parameters are for internal use and should be ignored.
"""
# service name is used to lookup a service URL from config
service_name: str = "_base"
# path under the client base URL
base_path: str = "/"
#: the class for errors raised by this client on HTTP 4xx and 5xx errors
#: this can be set in subclasses, but must always be a subclass of GlobusError
error_class: Type[exc.GlobusAPIError] = exc.GlobusAPIError
#: the type of Transport which will be used, defaults to ``RequestsTransport``
transport_class: Type[RequestsTransport] = RequestsTransport
#: the scopes for this client may be present as a ``ScopeBuilder``
scopes: Optional[ScopeBuilder] = None
def __init__(
self,
*,
environment: Optional[str] = None,
base_url: Optional[str] = None,
authorizer: Optional[GlobusAuthorizer] = None,
app_name: Optional[str] = None,
transport_params: Optional[Dict[str, Any]] = None,
):
# explicitly check the `service_name` to ensure that it was set
#
# unfortunately, we can't rely on declaring BaseClient as an ABC because it
# doesn't have any abstract methods
#
# if we declare `service_name` without a value, we get AttributeError on access
# instead of the (desired) TypeError when instantiating a BaseClient because
# it's abstract
if self.service_name == "_base":
raise NotImplementedError(
"Cannot instantiate clients which do not set a 'service_name'"
)
log.info(
f'Creating client of type {type(self)} for service "{self.service_name}"'
)
# if an environment was passed, it will be used, but otherwise lookup
# the env var -- and in the special case of `production` translate to
# `default`, regardless of the source of that value
# logs the environment when it isn't `default`
self.environment = config.get_environment_name(environment)
self.transport = self.transport_class(**(transport_params or {}))
log.debug(f"initialized transport of type {type(self.transport)}")
if not self.service_name and not base_url:
raise ValueError("Either service_name or base_url must be set")
self.base_url = utils.slash_join(
config.get_service_url(self.service_name, environment=self.environment)
if base_url is None
else base_url,
self.base_path,
)
self.authorizer = authorizer
# set application name if given
self._app_name = None
if app_name is not None:
self.app_name = app_name
# setup paginated methods
self.paginated = PaginatorTable(self)
@property
def app_name(self) -> Optional[str]:
return self._app_name
@app_name.setter
def app_name(self, value: str) -> None:
self._app_name = self.transport.user_agent = value
@utils.classproperty
def resource_server(cls) -> Optional[str]:
"""
The resource_server name for the API and scopes associated with this client.
This information is pulled from the ``scopes`` attribute of the client class.
If the client does not have associated scopes, this value will be ``None``.
"""
if cls.scopes is None:
return None
return cls.scopes.resource_server
def get(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a GET request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"GET to {path} with query_params {query_params}")
return self.request("GET", path, query_params=query_params, headers=headers)
def post(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a POST request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"POST to {path} with query_params {query_params}")
return self.request(
"POST",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def delete(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a DELETE request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"DELETE to {path} with query_params {query_params}")
return self.request("DELETE", path, query_params=query_params, headers=headers)
def put(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PUT request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PUT to {path} with query_params {query_params}")
return self.request(
"PUT",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def patch(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PATCH request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PATCH to {path} with query_params {query_params}")
return self.request(
"PATCH",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def request(
self,
method: str,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
# prepare data...
# copy headers if present
rheaders = {**headers} if headers else {}
# if a client is asked to make a request against a full URL, not just the path
# component, then do not resolve the path, simply pass it through as the URL
if path.startswith("https://") or path.startswith("http://"):
url = path
else:
url = utils.slash_join(self.base_url, urllib.parse.quote(path))
# make the request
log.debug("request will hit URL: %s", url)
r = self.transport.request(
method=method,
url=url,
data=data.data if isinstance(data, utils.PayloadWrapper) else data,
query_params=query_params,
headers=rheaders,
encoding=encoding,
authorizer=self.authorizer,
)
log.debug("request made to URL: %s", r.url)
if 200 <= r.status_code < 400:
log.debug(f"request completed with response code: {r.status_code}")
return GlobusHTTPResponse(r, self)
log.debug(f"request completed with (error) response code: {r.status_code}")
raise self.error_class(r)
| 36.133779 | 87 | 0.623195 | [
"ECL-2.0",
"Apache-2.0"
] | rudyardrichter/globus-sdk-python | src/globus_sdk/client.py | 10,804 | Python |
#!/Users/drpaneas/Virtualenvs/linuxed/bin/python2.7
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| 26.416667 | 73 | 0.744479 | [
"MIT"
] | drpaneas/linuxed.gr | bin/rst2pseudoxml.py | 634 | Python |
from django.urls import re_path
from ..views import OptionsExportView, OptionsView
urlpatterns = [
re_path(r'^$', OptionsView.as_view(), name='options'),
re_path(r'^export/(?P<format>[a-z]+)/$', OptionsExportView.as_view(), name='options_export'),
]
| 28.888889 | 97 | 0.703846 | [
"Apache-2.0"
] | GeoinformationSystems/rdmo | rdmo/options/urls/__init__.py | 260 | Python |
"""Identifiers for objects in Robustness Gym."""
from __future__ import annotations
import ast
import json
from typing import Any, Callable, List, Union
# from robustnessgym.core.tools import persistent_hash
class Identifier:
"""Class for creating identifiers for objects in Robustness Gym."""
def __init__(
self,
_name: str,
_index: Union[str, int] = None,
**kwargs,
):
self._name = _name
self._index = str(_index) if _index is not None else None
self._parameters = kwargs
# Add the parameter
for param, value in self.parameters.items():
self.add_parameter(param, value)
@property
def name(self):
"""Base name."""
return self._name
@property
def index(self):
"""Index associated with the identifier."""
return self._index
@property
def parameters(self):
"""Additional parameters contained in the identifier."""
return self._parameters
@classmethod
def range(cls, n: int, _name: str, **kwargs) -> List[Identifier]:
"""Create a list of identifiers, with index varying from 1 to `n`."""
if n > 1:
return [cls(_name=_name, _index=i, **kwargs) for i in range(1, n + 1)]
return [cls(_name=_name, **kwargs)]
def __call__(self, **kwargs):
"""Call the identifier with additional parameters to return a new
identifier."""
ident = Identifier.loads(self.dumps())
for parameter, value in kwargs.items():
ident.add_parameter(parameter, value)
return ident
def __repr__(self):
params = ", ".join([f"{k}={v}" for k, v in self.parameters.items()])
if self.index is not None:
return (
f"{self.name}-{self.index}({params})"
if len(params) > 0
else f"{self.name}-{self.index}"
)
return f"{self.name}({params})" if len(params) > 0 else f"{self.name}"
def __hash__(self):
# return persistent_hash(str(self))
return hash(str(self))
def __eq__(self, other: Union[Identifier, str]):
return str(self) == str(other)
def dumps(self):
"""Dump the identifier to JSON."""
return json.dumps(self.__dict__)
@staticmethod
def _parse_args(s: str):
"""https://stackoverflow.com/questions/49723047/parsing-a-string-as-a-
python-argument-list."""
args = "f({})".format(s)
tree = ast.parse(args)
funccall = tree.body[0].value
# return {arg.arg: ast.literal_eval(arg.value) for arg in funccall.keywords}
params = {}
for arg in funccall.keywords:
try:
params[arg.arg] = ast.literal_eval(arg.value)
except ValueError:
params[arg.arg] = arg.value.id
return params
@classmethod
def parse(cls, s: str) -> Identifier:
"""Parse in an identifier from string."""
# Parse out the various components
if "(" in s:
name_index, params = s.split("(")
params = params.split(")")[0]
else:
name_index = s
params = None
# Create the name and index
if "-" in name_index:
name, index = name_index.split("-")[:-1], name_index.split("-")[-1]
name = "-".join(name)
if index.isnumeric():
index = int(index)
else:
name = "-".join([name, index])
index = None
else:
name = name_index
index = None
# Parse out the params
if params is not None:
params = cls._parse_args(params)
else:
params = {}
return cls(_name=name, _index=index, **params)
def without(self, *params) -> Identifier:
"""Returns an identifier without `params`."""
return Identifier(
self.name,
self.index,
**{k: v for k, v in self.parameters.items() if k not in set(params)},
)
@classmethod
def loads(cls, s: str):
"""Load the identifier from JSON."""
identifier = Identifier(_name="")
identifier.__dict__ = json.loads(s)
return identifier
def add_parameter(self, parameter: str, value: Any) -> None:
"""Add a parameter to the identifier."""
if isinstance(value, Callable):
self.parameters[parameter] = ".".join(
[str(value.__module__), str(value.__name__)]
)
else:
self.parameters[parameter] = value
# Assign Id as an alias for the Identifier class
Id = Identifier
| 30.25 | 84 | 0.559229 | [
"Apache-2.0"
] | ANarayan/robustness-gym | robustnessgym/core/identifier.py | 4,719 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAbind(RPackage):
"""
Combine Multidimensional Arrays.
Combine multidimensional arrays into a single array. This is a
generalization of 'cbind' and 'rbind'. Works with vectors, matrices, and
higher-dimensional arrays. Also provides functions 'adrop', 'asub', and
'afill' for manipulating, extracting and replacing data in arrays."""
cran = "abind"
version('1.4-5', sha256='3a3ace5afbcb86e56889efcebf3bf5c3bb042a282ba7cc4412d450bb246a3f2c')
version('1.4-3', sha256='b6c255878c1ab81701ae701f34546e88be115629b984ac4272e311fa3c0ea6ce')
depends_on('[email protected]:', type=('build', 'run'))
| 35.333333 | 95 | 0.742925 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | FJ-NaokiMatsumura/spack | var/spack/repos/builtin/packages/r-abind/package.py | 848 | Python |
import numpy as np
import torch
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
return torch.FloatTensor(sinusoid_table) | 33.391304 | 89 | 0.684896 | [
"MIT"
] | MrSchnappi/RL-for-Question-Generation | src/onqg/utils/sinusoid.py | 768 | Python |
from unittest import TestCase
class TestNumbers2Words(TestCase):
from numbers2words import numbers2words
# 1-10
def test_one(self):
self.assertEqual(numbers2words(1), "one")
def test_two(self):
self.assertEqual(numbers2words(2), "two")
def test_three(self):
self.assertEqual(numbers2words(3), "three")
def test_four(self):
self.assertEqual(numbers2words(4), "four")
def test_five(self):
self.assertEqual(numbers2words(5), "five")
def test_six(self):
self.assertEqual(numbers2words(6), "six")
def test_seven(self):
self.assertEqual(numbers2words(7), "seven")
def test_eight(self):
self.assertEqual(numbers2words(8), "eight")
def test_nine(self):
self.assertEqual(numbers2words(9), "nine")
def test_ten(self):
self.assertEqual(numbers2words(10), "ten")
# 11-20
def test_eleven(self):
self.assertEqual(numbers2words(11), "eleven")
def test_twelve(self):
self.assertEqual(numbers2words(12), "twelve")
def test_thirteen(self):
self.assertEqual(numbers2words(13), "thirteen")
def test_fourteen(self):
self.assertEqual(numbers2words(14), "fourteen")
def test_fifteen(self):
self.assertEqual(numbers2words(15), "fifteen")
def test_sixteen(self):
self.assertEqual(numbers2words(16), "sixteen")
def test_seventeen(self):
self.assertEqual(numbers2words(17), "seventeen")
def test_eighteen(self):
self.assertEqual(numbers2words(18), "eighteen")
def test_nineteen(self):
self.assertEqual(numbers2words(19), "nineteen")
def test_twenty(self):
self.assertEqual(numbers2words(20), "twenty")
| 23.214286 | 52 | 0.710154 | [
"MIT"
] | gnuchu/n2w | tests/test_n2w.py | 1,625 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
import tensorflow_model_optimization as tfmot
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch,
current_batch,
steps_per_epoch,
batch_size):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
initial_lr = BASE_LEARNING_RATE * batch_size / 256
epoch = current_epoch + float(current_batch) / steps_per_epoch
warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
if epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
for mult, start_epoch in LR_SCHEDULE:
if epoch >= start_epoch:
learning_rate = initial_lr * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * steps_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(
steps_per_epoch,
learning_rate_schedule_fn=None,
pruning_method=None,
enable_checkpoint_and_export=False,
model_dir=None):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard,
steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = pruning_method is not None
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if model_dir is not None:
callbacks.append(tfmot.sparsity.keras.PruningSummaries(
log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if model_dir is not None:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def define_keras_flags(
dynamic_loss_scale=True,
model=False,
optimizer=False,
pretrained_filepath=False):
"""Define flags for Keras models."""
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
train_epochs=True, epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
loss_scale=True,
fp16_implementation=True,
tf_data_experimental_slack=True,
enable_xla=True,
force_v2_in_keras_compile=True,
training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
# TODO(b/135607288): Remove this flag once we understand the root cause of
# slowdown when setting the learning phase in Keras backend.
flags.DEFINE_boolean(
name='set_learning_phase_to_train', default=True,
help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(
name='explicit_gpu_placement', default=False,
help='If not using distribution strategy, explicitly set device scope '
'for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. This flag will be '
'ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of global steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(
name='enable_checkpoint_and_export', default=False,
help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(
name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(
name='steps_per_loop',
default=500,
help='Number of steps per training loop. Only training step happens '
'inside the loop. Callbacks will not be called inside. Will be capped at '
'steps per epoch.')
flags.DEFINE_boolean(
name='use_tf_while_loop',
default=True,
help='Whether to build a tf.while_loop inside the training loop on the '
'host. Setting it to True is critical to have peak performance on '
'TPU.')
flags.DEFINE_boolean(
name='use_tf_keras_layers', default=False,
help='Whether to use tf.keras.layers instead of tf.python.keras.layers.'
'It only changes imagenet resnet model layers for now. This flag is '
'a temporal flag during transition to tf.keras.layers. Do not use this '
'flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5',
'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default',
'Name of optimizer preset. '
'(mobilenet_default, resnet50_default)')
# TODO(kimjaehong): Replace as general hyper-params not only for mobilenet.
flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007,
'Initial value of learning rate per sample for '
'mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94,
'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5,
'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '',
'Pretrained file path.')
def get_synth_data(height, width, num_channels, num_classes, dtype):
"""Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
"""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
return inputs, labels
def define_pruning_flags():
"""Define flags for pruning methods."""
flags.DEFINE_string('pruning_method', None,
'Pruning method.'
'None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0,
'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5,
'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0,
'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000,
'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100,
'Frequency for pruning.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
inputs, labels = get_synth_data(height=height,
width=width,
num_channels=num_channels,
num_classes=num_classes,
dtype=dtype)
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
"""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
| 41.699588 | 88 | 0.673937 | [
"Apache-2.0"
] | Anku5hk/models | official/vision/image_classification/common.py | 20,266 | Python |
import collections
import itertools
from trie_class import Trie
import sys
import timeit
def load_dataset(filename):
dataset = [sorted(int(n) for n in i.strip().split())
for i in open(filename).readlines()]
size = len(dataset)
print('Size of the Dataset : ', size)
total_len = 0
for i in range(len(dataset)):
total_len = total_len + len(dataset[i])
avg_len = total_len / size
print('Average Transaction Length : ', avg_len)
# print(dataset)
return dataset
def find_frequent_1_itemsets(dataset, min_sup):
# print('1 - item func : min sup :', min_sup)
frequency = dict(collections.Counter(
itertools.chain.from_iterable(dataset)))
L1 = dict()
for item, freq in frequency.items():
if freq >= min_sup:
L1[item] = freq
# print(L1)
return L1
# Input : L_k (k : itemset size)
def apriori_gen(L: list, k):
# Self Join Step
L_next = list()
for l1 in L:
for l2 in L:
if len(set(l1) & set(l2)) == (k - 1):
L_next.append(sorted(list(set(l1) | set(l2))))
# Removing Duplicates
L_set = set(tuple(x) for x in L_next)
L_k1 = [list(x) for x in L_set]
L_k1.sort(key=lambda x: L_next.index(x))
L_k1_tuple = [tuple(i) for i in L_k1]
info={'join':len(L_k1)}
# Prune Step
for c in L_k1_tuple:
if has_infrequent_subset(c, L):
L_k1.remove(list(c))
info['prune'] = len(L_k1)
# Returns list of lists [L_k + 1]
return info,L_k1
def has_infrequent_subset(candidate: tuple, L: list):
for subset in list(itertools.combinations(candidate, len(candidate) - 1)):
if list(subset) not in L:
return True
return False
def apriori(db: list, min_sup):
min_sup = (len(db) * min_sup) // 100
# print('Apriori - min sup :', min_sup)
levels = list()
Levels_info = list()
L1 = find_frequent_1_itemsets(db, min_sup)
# print('L-1 :', L1)
if bool(L1) == False:
print('No 1-Itemset Satisfies Given Minimum Support Threshold')
return None
# Creating list of 1-itemset(list itself)
_L1 = [[k] for k in L1.keys()]
_L1 = sorted(_L1)
# print('L1 :', L1)
levels.append(_L1)
Levels_info.append({'join': len(_L1), 'prune': len(_L1)})
# print('Levels :', levels)
while True:
info,candidates = apriori_gen(levels[-1], len(levels[-1][0]))
trie = Trie(db)
trie.build_trie(candidates)
trie.assign_frequency()
L = list()
# print('Func : Min Sup -', min_sup)
for itemset in candidates:
# print(itemset, trie.get_candidate_freq(itemset), trie.get_candidate_freq(itemset) >= min_sup)
if trie.get_candidate_freq(itemset) >= min_sup:
# print(itemset, trie.get_candidate_freq(itemset), trie.get_candidate_freq(itemset) >= min_sup)
L.append(sorted(itemset))
if not L:
break
levels.append(L)
Levels_info.append(info)
return Levels_info,levels
if __name__ == "__main__":
db = load_dataset(str(sys.argv[1]))
min_sup = float(sys.argv[2])
print('Dataset :', str(sys.argv[1]))
print('Min Support :', min_sup, '%')
print('Min Support Count :', (len(db) * min_sup) // 100)
start = timeit.default_timer()
info, L = apriori(db, min_sup)
stop = timeit.default_timer()
pattern_total,join_total,prune_total = 0,0,0
print('Level', ' After Join', ' After Pruning', ' Frequent Itemsets')
if L is not None:
for i in range(len(L)):
print()
print((i + 1), info[i]['join'], info[i]['prune'], len(L[i]), sep='\t\t')
pattern_total+=len(L[i])
join_total+=info[i]['join']
prune_total+= info[i]['prune']
# print((i + 1), '- Frequent Itemsets :', L[i])
print('\nTotal', join_total, prune_total, pattern_total, sep='\t\t')
print('\nTime: ', stop - start, " seconds")
| 25.871795 | 111 | 0.588206 | [
"MIT"
] | saif-mahmud/Data-Mining-Lab | Frequent Pattern Mining/apriori.py | 4,036 | Python |
"""Spamming Module
{i}spam <no of msgs> <msg>
Note:- Don't use to much"""
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
from asyncio import wait
from telethon import events
@ItzSjDude(outgoing=True, pattern=r"spam")
async def spammer(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[6:8])
spam_message = str(e.text[8:])
await wait(
[e.respond(spam_message) for i in range(counter)]
)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP,
"#SPAM \n\n"
"Spam was executed successfully"
)
| 25.527778 | 79 | 0.557127 | [
"Apache-2.0"
] | xditya/PikaBotPlugins | plugins/spam.py | 919 | Python |
# stdlib
import json
from typing import List
from typing import NoReturn
from typing import Optional
# third party
from fastapi import APIRouter
from fastapi import Depends
from fastapi import File
from fastapi import Form
from fastapi import UploadFile
from loguru import logger
from starlette import status
from starlette.exceptions import HTTPException
# grid absolute
from grid.api.dependencies.current_user import get_current_user
from grid.api.users.models import ApplicantStatus
from grid.api.users.models import User
from grid.api.users.models import UserCandidate
from grid.api.users.models import UserCreate
from grid.api.users.models import UserPrivate
from grid.api.users.models import UserUpdate
# relative
from . import syft as syft_user_messages
def raise_generic_private_error() -> NoReturn:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="There was an error processing your request.",
)
router = APIRouter()
@router.get("/me", response_model=User, name="users:me", status_code=status.HTTP_200_OK)
def get_self(current_user: UserPrivate = Depends(get_current_user)) -> User:
return current_user
# TODO: Syft should return the newly created user and the response model should be User.
@router.post("", name="users:create", status_code=status.HTTP_201_CREATED)
async def create_user_grid(
current_user: UserPrivate = Depends(get_current_user),
new_user: str = Form(...),
file: Optional[UploadFile] = File(None),
) -> str:
if file:
pdf_file = file.file.read() # type: ignore
else:
pdf_file = b""
dict_user = json.loads(new_user)
dict_user["daa_pdf"] = pdf_file
user_schema = UserCreate(**dict_user)
try:
return syft_user_messages.create_user(user_schema, current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.get("/applicants", name="users:applicants", status_code=status.HTTP_201_CREATED)
async def get_all_candidates(
current_user: UserPrivate = Depends(get_current_user),
) -> List[UserCandidate]:
try:
return syft_user_messages.get_user_requests(current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.patch(
"/applicants/{candidate_id}",
name="users:applicants:process",
status_code=status.HTTP_201_CREATED,
)
async def process_applicant_request(
candidate_id: int,
request_status: ApplicantStatus,
current_user: UserPrivate = Depends(get_current_user),
) -> str:
try:
return syft_user_messages.process_applicant_request(
current_user=current_user,
candidate_id=candidate_id,
status=request_status.status,
)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.get(
"",
response_model=List[User],
name="users:read_all",
status_code=status.HTTP_200_OK,
)
async def get_all_users_grid(
current_user: UserPrivate = Depends(get_current_user),
) -> List[User]:
try:
return syft_user_messages.get_all_users(current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.get(
"/{user_id}",
response_model=User,
name="users:read_one",
status_code=status.HTTP_200_OK,
)
async def get_user_grid(
user_id: int, current_user: UserPrivate = Depends(get_current_user)
) -> User:
try:
return syft_user_messages.get_user(user_id, current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.patch(
"/{user_id}",
name="users:update",
status_code=status.HTTP_204_NO_CONTENT,
)
async def update_user_grid(
user_id: int,
updated_user: UserUpdate,
current_user: UserPrivate = Depends(get_current_user),
) -> None:
try:
syft_user_messages.update_user(user_id, current_user, updated_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
@router.delete(
"/{user_id}", name="users:delete", status_code=status.HTTP_204_NO_CONTENT
)
async def delete_user_grid(
user_id: int, current_user: UserPrivate = Depends(get_current_user)
) -> None:
try:
syft_user_messages.delete_user(user_id, current_user)
except Exception as err:
logger.error(err)
raise_generic_private_error()
| 28.069182 | 88 | 0.725745 | [
"Apache-2.0"
] | BearerPipelineTest/PySyft | packages/grid/backend/grid/api/users/routes.py | 4,463 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../../yara_scanner'))
# -- Project information -----------------------------------------------------
project = 'yara-scanner'
copyright = '2020, John Davison'
author = 'John Davison'
# The full version, including alpha/beta/rc tags
release = '1.0.14'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# Both the class’ and the __init__ method’s docstring are concatenated and inserted.
autoclass_content = 'both'
autodoc_inherit_docstrings = False
| 33.630769 | 84 | 0.674291 | [
"Apache-2.0"
] | 2d4d/yara_scanner | docs/source/conf.py | 2,190 | Python |
import json
import os
import numpy as np
import tensorflow.compat.v1 as tf
import argparse
from tqdm import tqdm
import model
from encode_bpe import BPEEncoder_ja
if int(tf.__version__[0]) > 1:
from model import HParams as HParams
else:
from tensorflow.contrib.training import HParams
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='gpt2ja-medium')
parser.add_argument('--context', type=str, required=True)
parser.add_argument('--gpu', type=str, default='0')
args = parser.parse_args()
with open('ja-bpe.txt', encoding='utf-8') as f:
bpe = f.read().split('\n')
with open('emoji.json', encoding='utf-8') as f:
emoji = json.loads(f.read())
enc = BPEEncoder_ja(bpe, emoji)
n_vocab = len(enc)
if os.path.isfile(args.model+'/hparams.json'):
with open(args.model+'/hparams.json', encoding='utf-8') as f:
params = json.loads(f.read())
hparams = HParams(**params)
elif 'small' in args.model:
hparams = HParams(**{
"n_vocab": n_vocab,
"n_ctx": 1024,
"n_embd": 768,
"n_head": 12,
"n_layer": 12
})
elif 'medium' in args.model:
hparams = HParams(**{
"n_vocab": n_vocab,
"n_ctx": 1024,
"n_embd": 1024,
"n_head": 16,
"n_layer": 24
})
elif 'large' in args.model:
hparams = HParams(**{
"n_vocab": n_vocab,
"n_ctx": 1024,
"n_embd": 1280,
"n_head": 20,
"n_layer": 36
})
else:
raise ValueError('invalid model name.')
config = tf.ConfigProto()
if int(args.gpu) >= 0:
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = args.gpu
with tf.Session(config=config,graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [1, None])
output = model.model(hparams=hparams, X=context, past=None, reuse=tf.AUTO_REUSE)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(args.model)
saver.restore(sess, ckpt)
context_tokens = enc.encode(args.context)
out = sess.run(output, feed_dict={
context: [context_tokens]
})
output = out['h_flat'][-1]
print(output.tolist())
| 26.949367 | 84 | 0.648661 | [
"MIT"
] | july-language/gpt2-japanese | gpt2-transform.py | 2,129 | Python |
from pathlib import Path
from datetime import datetime, timedelta
from src.settings import envs
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from airflow.hooks.postgres_hook import PostgresHook
import logging
from src.settings import log_config
import shutil
# Setting up module from __file__ as the interpreter sets __name__ as __main__ when the source file is executed as
# main program
logger = logging.getLogger(name=__file__.replace(envs.PROJECT_ROOT, '').replace('/', '.')[1:-3])
# these args will get passed on to each operator
# you can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': days_ago(7),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
'catchup': False
}
DAG_ID = '{p.parent.name}_{p.stem}'.format(p=Path(__file__))
PARAMS = Variable.get(DAG_ID, deserialize_json=True)
SCHEDULE_INTERVAL = PARAMS.get('schedule_interval') or None
DAYS_TO_RETAIN = PARAMS.get('days_to_retain', 60)
TABLES = ("xcom", "task_instance", "sla_miss", "log", "dag_run", "task_fail", "task_reschedule")
LOG_DIR = '/usr/local/airflow/logs'
dag = DAG(
DAG_ID,
default_args=default_args,
description='BT Bill Clean Up DAGs Metadata and Logs',
schedule_interval=SCHEDULE_INTERVAL,
max_active_runs=1
)
def clean_dags_logs():
hook = PostgresHook(postgres_conn_id="airflow_postgres")
dag_files = Path(Path(__file__).parent).glob('*.py')
dags = ['{p.parent.name}_{p.stem}'.format(p=p) for p in dag_files]
dags = [d for d in dags if d != DAG_ID]
execution_date = datetime.date(datetime.now()) - timedelta(days=DAYS_TO_RETAIN)
p = Path(LOG_DIR)
for d in dags:
logger.info("Cleaning up meta tables for {}".format(d))
for t in TABLES:
sql = "delete from {} where dag_id='{}' and execution_date < '{}'".format(t, d, execution_date)
hook.run(sql, True)
logger.info('Cleaning up log folder for {}'.format(d))
for path in list(p.glob('{}/*/*'.format(d))):
log_date = str(path).split('/')[-1]
log_date = log_date.split('T')[0]
log_date = datetime.date(datetime.strptime(log_date, '%Y-%m-%d'))
if log_date < execution_date:
logger.info('Deleting dir {}'.format(str(path.absolute())))
shutil.rmtree(str(path.absolute()))
clean_up = PythonOperator(
task_id='clean_up',
python_callable=clean_dags_logs,
dag=dag)
dag >> clean_up
| 36.052632 | 114 | 0.685766 | [
"Apache-2.0"
] | Mahe1980/btb | src/airflow_dags/dags/btb/house_keeping.py | 2,740 | Python |
from abc import ABC
from asyncio import Lock as AsyncLock
from collections import ChainMap, OrderedDict
from dataclasses import dataclass, field
from datetime import timedelta
from functools import partial, wraps
from hashlib import sha256
import inspect
from pathlib import Path
import pickle
from sqlite3 import connect, Connection
from textwrap import dedent
from time import time
from threading import Lock as SyncLock
from typing import Any, Callable, Hashable, Mapping, Optional, Tuple, Type, Union
from weakref import finalize, WeakSet
Decoratee = Union[Callable, Type]
Keygen = Callable[..., Any]
class Pickler(ABC):
@staticmethod
def dumps(_str: str) -> str:
... # pragma: no cover
@staticmethod
def loads(_bytes: bytes) -> Any:
... # pragma: no cover
class _MemoZeroValue:
pass
@dataclass
class _MemoReturnState:
called: bool = False
raised: bool = False
value: Any = _MemoZeroValue
@dataclass(frozen=True)
class _MemoBase:
t0: Optional[float]
memo_return_state: _MemoReturnState = field(init=False, default_factory=_MemoReturnState)
@dataclass(frozen=True)
class _AsyncMemo(_MemoBase):
async_lock: AsyncLock = field(init=False, default_factory=lambda: AsyncLock())
@dataclass(frozen=True)
class _SyncMemo(_MemoBase):
sync_lock: SyncLock = field(init=False, default_factory=lambda: SyncLock())
_Memo = Union[_AsyncMemo, _SyncMemo]
@dataclass(frozen=True)
class _MemoizeBase:
db: Optional[Connection]
default_kwargs: Mapping[str, Any]
duration: Optional[timedelta]
fn: Callable
keygen: Optional[Keygen]
pickler: Pickler = field(hash=False)
size: Optional[int]
expire_order: OrderedDict = field(init=False, default_factory=OrderedDict, hash=False)
memos: OrderedDict = field(init=False, default_factory=OrderedDict, hash=False)
def __post_init__(self) -> None:
if self.db is not None:
self.db.isolation_level = None
self.db.execute(dedent(f'''
CREATE TABLE IF NOT EXISTS `{self.table_name}` (
k TEXT PRIMARY KEY,
t0 FLOAT,
t FLOAT,
v TEXT NOT NULL
)
'''))
if self.duration:
self.db.execute(dedent(f'''
DELETE FROM `{self.table_name}`
WHERE t0 < {time() - self.duration.total_seconds()}
'''))
if self.size:
res = self.db.execute(
f"SELECT t FROM `{self.table_name}` ORDER BY t DESC LIMIT {self.size}"
).fetchall()
if res:
(min_t,) = res[-1]
self.db.execute(f"DELETE FROM `{self.table_name}` WHERE t < {min_t}")
for k, t0, t, v in self.db.execute(
f"SELECT k, t0, t, v FROM `{self.table_name}` ORDER BY t"
).fetchall():
memo = self.make_memo(t0=t0)
memo.memo_return_state.called = True
memo.memo_return_state.value = self.pickler.loads(v)
self.memos[k] = memo
if self.duration:
for k, t0 in self.db.execute(
f"SELECT k, t0 FROM `{self.table_name}` ORDER BY t0"
).fetchall():
self.expire_order[k] = ...
def __len__(self) -> int:
return len(self.memos)
@property
def table_name(self) -> str:
# noinspection PyUnresolvedReferences
return (
f'{self.fn.__code__.co_filename}'
f':{self.fn.__code__.co_name}'
f':{self.fn.__code__.co_firstlineno}'
)
def bind_key_lifetime(self, raw_key: Tuple[Any, ...], key: Union[int, str]) -> None:
for raw_key_part in raw_key:
if (raw_key_part is not None) and (type(raw_key_part).__hash__ is object.__hash__):
finalize(raw_key_part, self.reset_key, key)
def default_keygen(self, *args, **kwargs) -> Tuple[Hashable, ...]:
"""Returns all params (args, kwargs, and missing default kwargs) for function as kwargs."""
return tuple(self.get_args_as_kwargs(*args, **kwargs).values())
def get_args_as_kwargs(self, *args, **kwargs) -> Mapping[str, Any]:
args_as_kwargs = {}
for k, v in zip(self.default_kwargs, args):
args_as_kwargs[k] = v
return ChainMap(args_as_kwargs, kwargs, self.default_kwargs)
def get_memo(self, key: Union[int, str], insert: bool) -> Optional[_Memo]:
try:
memo = self.memos[key] = self.memos.pop(key)
if self.duration is not None and memo.t0 < time() - self.duration.total_seconds():
self.expire_order.pop(key)
raise ValueError('value expired')
except (KeyError, ValueError):
if not insert:
return None
elif self.duration is None:
t0 = None
else:
t0 = time()
# The value has no significance. We're using the dict entirely for ordering keys.
self.expire_order[key] = ...
memo = self.memos[key] = self.make_memo(t0=t0)
return memo
def expire_one_memo(self) -> None:
k = None
if (
(self.expire_order is not None) and
(len(self.expire_order) > 0) and
(
self.memos[next(iter(self.expire_order))].t0 <
time() - self.duration.total_seconds()
)
):
(k, _) = self.expire_order.popitem(last=False)
self.memos.pop(k)
elif self.size is not None and self.size < len(self.memos):
(k, _) = self.memos.popitem(last=False)
if (self.db is not None) and (k is not None):
self.db.execute(f"DELETE FROM `{self.table_name}` WHERE k = '{k}'")
def finalize_memo(self, memo: _Memo, key: Union[int, str]) -> Any:
if memo.memo_return_state.raised:
raise memo.memo_return_state.value
elif (self.db is not None) and (self.memos[key] is memo):
value = self.pickler.dumps(memo.memo_return_state.value)
self.db.execute(
dedent(f'''
INSERT OR REPLACE INTO `{self.table_name}`
(k, t0, t, v)
VALUES
(?, ?, ?, ?)
'''),
(
key,
memo.t0,
time(),
value
)
)
return memo.memo_return_state.value
def get_key(self, raw_key: Tuple[Hashable, ...]) -> Union[int, str]:
if self.db is None:
key = hash(raw_key)
else:
key = sha256(str(raw_key).encode()).hexdigest()
return key
@staticmethod
def make_memo(t0: Optional[float]) -> _Memo: # pragma: no cover
raise NotImplemented
def reset(self) -> None:
object.__setattr__(self, 'expire_order', OrderedDict())
object.__setattr__(self, 'memos', OrderedDict())
if self.db is not None:
self.db.execute(f"DELETE FROM `{self.table_name}`")
def reset_key(self, key: Union[int, str]) -> None:
if key in self.memos:
self.memos.pop(key)
if self.duration is not None:
self.expire_order.pop(key)
if self.db is not None:
self.db.execute(f"DELETE FROM `{self.table_name}` WHERE k == '{key}'")
@dataclass(frozen=True)
class _AsyncMemoize(_MemoizeBase):
async def get_raw_key(self, *args, **kwargs) -> Tuple[Hashable, ...]:
if self.keygen is None:
raw_key = self.default_keygen(*args, **kwargs)
else:
raw_key = self.keygen(**self.get_args_as_kwargs(*args, **kwargs))
if isinstance(raw_key, tuple):
raw_key = list(raw_key)
else:
raw_key = [raw_key]
for i, v in enumerate(raw_key):
if inspect.isawaitable(v):
raw_key[i] = await v
raw_key = tuple(raw_key)
return raw_key
def get_behavior(self, *, insert: bool, update: bool) -> Callable:
def get_call(*, fn: Callable) -> Callable:
@wraps(self.fn)
async def call(*args, **kwargs) -> Any:
raw_key = await self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
memo: _AsyncMemo = self.get_memo(key, insert=insert)
if memo is None:
return await fn(*args, **kwargs)
self.expire_one_memo()
async with memo.async_lock:
if (
(insert and not memo.memo_return_state.called) or
(update and memo.memo_return_state.value is not _MemoZeroValue)
):
memo.memo_return_state.called = True
try:
memo.memo_return_state.value = await fn(*args, **kwargs)
except Exception as e:
memo.memo_return_state.raised = True
memo.memo_return_state.value = e
self.bind_key_lifetime(raw_key, key)
return self.finalize_memo(memo=memo, key=key)
return call
return get_call
async def insert(self, *args, **kwargs) -> Any:
return await self.get_behavior(insert=True, update=False)(fn=self.fn)(*args, **kwargs)
def update(self, *args, **kwargs) -> Callable:
async def to(value: Any) -> Any:
async def fn(*_args, **_kwargs) -> Any:
return value
return await self.get_behavior(insert=False, update=True)(fn=fn)(*args, **kwargs)
return to
def upsert(self, *args, **kwargs) -> Callable:
async def to(value: Any) -> Any:
async def fn(*_args, **_kwargs) -> Any:
return value
return await self.get_behavior(insert=True, update=True)(fn=fn)(*args, **kwargs)
return to
async def remove(self, *args, **kwargs) -> None:
raw_key = await self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
self.reset_key(key)
def get_decorator(self) -> Callable:
async def decorator(*args, **kwargs) -> Any:
return await self.insert(*args, **kwargs)
decorator.memoize = self
return decorator
@staticmethod
def make_memo(t0: Optional[float]) -> _AsyncMemo:
return _AsyncMemo(t0=t0)
@dataclass(frozen=True)
class _SyncMemoize(_MemoizeBase):
_sync_lock: SyncLock = field(init=False, default_factory=lambda: SyncLock())
def get_raw_key(self, *args, **kwargs) -> Tuple[Hashable, ...]:
if self.keygen is None:
raw_key = self.default_keygen(*args, **kwargs)
else:
raw_key = self.keygen(**self.get_args_as_kwargs(*args, **kwargs))
if not isinstance(raw_key, tuple):
raw_key = [raw_key]
raw_key = tuple(raw_key)
return raw_key
def get_behavior(self, *, insert: bool, update: bool) -> Callable:
def get_call(*, fn: Callable) -> Callable:
@wraps(self.fn)
def call(*args, **kwargs) -> Any:
raw_key = self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
with self._sync_lock:
memo: _SyncMemo = self.get_memo(key, insert=insert)
if memo is None:
return fn(*args, **kwargs)
self.expire_one_memo()
with memo.sync_lock:
if (
(insert and not memo.memo_return_state.called) or
(update and memo.memo_return_state.value is not _MemoZeroValue)
):
memo.memo_return_state.called = True
try:
memo.memo_return_state.value = fn(*args, **kwargs)
except Exception as e:
memo.memo_return_state.raised = True
memo.memo_return_state.value = e
self.bind_key_lifetime(raw_key, key)
return self.finalize_memo(memo=memo, key=key)
return call
return get_call
def insert(self, *args, **kwargs) -> Any:
return self.get_behavior(insert=True, update=False)(fn=self.fn)(*args, **kwargs)
def update(self, *args, **kwargs) -> Callable:
def to(value: Any) -> Any:
def fn(*_args, **_kwargs) -> Any:
return value
return self.get_behavior(insert=False, update=True)(fn=fn)(*args, **kwargs)
return to
def upsert(self, *args, **kwargs) -> Callable:
def to(value: Any) -> Any:
def fn(*_args, **_kwargs) -> Any:
return value
return self.get_behavior(insert=True, update=True)(fn=fn)(*args, **kwargs)
return to
def remove(self, *args, **kwargs) -> None:
raw_key = self.get_raw_key(*args, **kwargs)
key = self.get_key(raw_key)
self.reset_key(key)
def get_decorator(self) -> Callable:
def decorator(*args, **kwargs) -> Any:
return self.insert(*args, **kwargs)
decorator.memoize = self
return decorator
@staticmethod
def make_memo(t0: Optional[float]) -> _SyncMemo:
return _SyncMemo(t0=t0)
def reset(self) -> None:
with self._sync_lock:
super().reset()
def reset_key(self, key: Union[int, str]) -> None:
with self._sync_lock:
super().reset_key(key)
class _Memoize:
"""Decorates a function call and caches return value for given inputs.
- If `db_path` is provided, memos will persist on disk and reloaded during initialization.
- If `duration` is provided, memos will only be valid for given `duration`.
- If `keygen` is provided, memo hash keys will be created with given `keygen`.
- If `pickler` is provided, persistent memos will (de)serialize using given `pickler`.
- If `size` is provided, LRU memo will be evicted if current count exceeds given `size`.
### Examples
- Body will run once for unique input `bar` and result is cached.
```python3
@memoize
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
foo(2) # Function actually called. Result cached.
```
- Same as above, but async.
```python3
@memoize
async def foo(bar) -> Any: ...
# Concurrent calls from the same event loop are safe. Only one call is generated. The
# other nine calls in this example wait for the result.
await asyncio.gather(*[foo(1) for _ in range(10)])
```
- Classes may be memoized.
```python3
@memoize
Class Foo:
def init(self, _): ...
Foo(1) # Instance is actually created.
Foo(1) # Instance not created. Cached instance returned.
Foo(2) # Instance is actually created.
```
- Calls `foo(1)`, `foo(bar=1)`, and `foo(1, baz='baz')` are equivalent and only cached once.
```python3
@memoize
def foo(bar, baz='baz'): ...
```
- Only 2 items are cached. Acts as an LRU.
```python3
@memoize(size=2)
def foo(bar) -> Any: ...
foo(1) # LRU cache order [foo(1)]
foo(2) # LRU cache order [foo(1), foo(2)]
foo(1) # LRU cache order [foo(2), foo(1)]
foo(3) # LRU cache order [foo(1), foo(3)], foo(2) is evicted to keep cache size at 2
```
- Items are evicted after 1 minute.
```python3
@memoize(duration=datetime.timedelta(minutes=1))
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
sleep(61)
foo(1) # Function actually called. Cached result was too old.
```
- Memoize can be explicitly reset through the function's `.memoize` attribute
```python3
@memoize
def foo(bar) -> Any: ...
foo(1) # Function actually called. Result cached.
foo(1) # Function not called. Cached result returned.
foo.memoize.reset()
foo(1) # Function actually called. Cache was emptied.
```
- Current cache length can be accessed through the function's `.memoize` attribute
```python3
@memoize
def foo(bar) -> Any: ...
foo(1)
foo(2)
len(foo.memoize) # returns 2
```
- Alternate memo hash function can be specified. The inputs must match the function's.
```python3
Class Foo:
@memoize(keygen=lambda self, a, b, c: (a, b, c)) # Omit 'self' from hash key.
def bar(self, a, b, c) -> Any: ...
a, b = Foo(), Foo()
# Hash key will be (a, b, c)
a.bar(1, 2, 3) # LRU cache order [Foo.bar(a, 1, 2, 3)]
# Hash key will again be (a, b, c)
# Be aware, in this example the returned result comes from a.bar(...), not b.bar(...).
b.bar(1, 2, 3) # Function not called. Cached result returned.
```
- If part of the returned key from keygen is awaitable, it will be awaited.
```python3
async def awaitable_key_part() -> Hashable: ...
@memoize(keygen=lambda bar: (bar, awaitable_key_part()))
async def foo(bar) -> Any: ...
```
- If the memoized function is async and any part of the key is awaitable, it is awaited.
```python3
async def morph_a(a: int) -> int: ...
@memoize(keygen=lambda a, b, c: (morph_a(a), b, c))
def foo(a, b, c) -> Any: ...
```
- Properties can be memoized.
```python3
Class Foo:
@property
@memoize
def bar(self) -> Any: ...
a = Foo()
a.bar # Function actually called. Result cached.
a.bar # Function not called. Cached result returned.
b = Foo() # Memoize uses 'self' parameter in hash. 'b' does not share returns with 'a'
b.bar # Function actually called. Result cached.
b.bar # Function not called. Cached result returned.
```
- Be careful with eviction on instance methods. Memoize is not instance-specific.
```python3
Class Foo:
@memoize(size=1)
def bar(self, baz) -> Any: ...
a, b = Foo(), Foo()
a.bar(1) # LRU cache order [Foo.bar(a, 1)]
b.bar(1) # LRU cache order [Foo.bar(b, 1)], Foo.bar(a, 1) is evicted
a.bar(1) # Foo.bar(a, 1) is actually called and cached again.
```
- Values can persist to disk and be reloaded when memoize is initialized again.
```python3
@memoize(db_path=Path.home() / '.memoize')
def foo(a) -> Any: ...
foo(1) # Function actually called. Result cached.
# Process is restarted. Upon restart, the state of the memoize decorator is reloaded.
foo(1) # Function not called. Cached result returned.
```
- If not applied to a function, calling the decorator returns a partial application.
```python3
memoize_db = memoize(db_path=Path.home() / '.memoize')
@memoize_db(size=1)
def foo(a) -> Any: ...
@memoize_db(duration=datetime.timedelta(hours=1))
def bar(b) -> Any: ...
```
- Comparison equality does not affect memoize. Only hash equality matters.
```python3
# Inherits object.__hash__
class Foo:
# Don't be fooled. memoize only cares about the hash.
def __eq__(self, other: Foo) -> bool:
return True
@memoize
def bar(foo: Foo) -> Any: ...
foo0, foo1 = Foo(), Foo()
assert foo0 == foo1
bar(foo0) # Function called. Result cached.
bar(foo1) # Function called again, despite equality, due to different hash.
```
### A warning about arguments that inherit `object.__hash__`:
It doesn't make sense to keep a memo if it's impossible to generate the same input again. Inputs
that inherit the default `object.__hash__` are unique based on their id, and thus, their
location in memory. If such inputs are garbage-collected, they are gone forever. For that
reason, when those inputs are garbage collected, `memoize` will drop memos created using those
inputs.
- Memo lifetime is bound to the lifetime of any arguments that inherit `object.__hash__`.
```python3
# Inherits object.__hash__
class Foo:
...
@memoize
def bar(foo: Foo) -> Any: ...
bar(Foo()) # Memo is immediately deleted since Foo() is garbage collected.
foo = Foo()
bar(foo) # Memo isn't deleted until foo is deleted.
del foo # Memo is deleted at the same time as foo.
```
- Types that have specific, consistent hash functions (int, str, etc.) won't cause problems.
```python3
@memoize
def foo(a: int, b: str, c: Tuple[int, ...], d: range) -> Any: ...
foo(1, 'bar', (1, 2, 3), range(42)) # Function called. Result cached.
foo(1, 'bar', (1, 2, 3), range(42)) # Function not called. Cached result returned.
```
- Classmethods rely on classes, which inherit from `object.__hash__`. However, classes are
almost never garbage collected until a process exits so memoize will work as expected.
```python3
class Foo:
@classmethod
@memoize
def bar(cls) -> Any: ...
foo = Foo()
foo.bar() # Function called. Result cached.
foo.bar() # Function not called. Cached result returned.
del foo # Memo not cleared since lifetime is bound to class Foo.
foo = Foo()
foo.bar() # Function not called. Cached result returned.
foo.bar() # Function not called. Cached result returned.
```
- Long-lasting object instances that inherit from `object.__hash__`.
```python3
class Foo:
@memoize
def bar(self) -> Any: ...
foo = Foo()
foo.bar() # Function called. Result cached.
# foo instance is kept around somewhere and used later.
foo.bar() # Function not called. Cached result returned.
```
- Custom pickler may be specified for unpickleable return types.
```python3
import dill
@memoize(db_path='~/.memoize`, pickler=dill)
def foo() -> Callable[[], None]:
return lambda: None
```
"""
_all_decorators = WeakSet()
@staticmethod
def __call__(
_decoratee: Optional[Decoratee] = None,
*,
db_path: Optional[Path] = None,
duration: Optional[Union[int, float, timedelta]] = None,
keygen: Optional[Keygen] = None,
pickler: Optional[Pickler] = None,
size: Optional[int] = None,
) -> Union[Decoratee]:
if _decoratee is None:
return partial(memoize, db_path=db_path, duration=duration, keygen=keygen, pickler=pickler, size=size)
if inspect.isclass(_decoratee):
assert db_path is None, 'Class memoization not allowed with db.'
class WrappedMeta(type(_decoratee)):
# noinspection PyMethodParameters
@memoize(duration=duration, size=size)
def __call__(cls, *args, **kwargs):
return super().__call__(*args, **kwargs)
class Wrapped(_decoratee, metaclass=WrappedMeta):
pass
return type(_decoratee.__name__, (Wrapped,), {'__doc__': _decoratee.__doc__})
db = connect(f'{db_path}') if db_path is not None else None
duration = timedelta(seconds=duration) if isinstance(duration, (int, float)) else duration
assert (duration is None) or (duration.total_seconds() > 0)
pickler = pickle if pickler is None else pickler
assert (size is None) or (size > 0)
fn = _decoratee
default_kwargs: Mapping[str, Any] = {
k: v.default for k, v in inspect.signature(fn).parameters.items()
}
if inspect.iscoroutinefunction(_decoratee):
decorator_cls = _AsyncMemoize
else:
decorator_cls = _SyncMemoize
# noinspection PyArgumentList
decorator = decorator_cls(
db=db,
default_kwargs=default_kwargs,
duration=duration,
fn=fn,
keygen=keygen,
pickler=pickler,
size=size,
).get_decorator()
_Memoize._all_decorators.add(decorator)
return wraps(_decoratee)(decorator)
@staticmethod
def reset_all() -> None:
for decorator in _Memoize._all_decorators:
decorator.memoize.reset()
memoize = _Memoize()
| 33.545455 | 114 | 0.564393 | [
"MIT"
] | cevans87/atools | atools/_memoize_decorator.py | 25,461 | Python |
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
| 29.098618 | 79 | 0.548343 | [
"BSD-3-Clause"
] | sosey/pyaxe | pyaxe/axesrc/configfile.py | 46,325 | Python |
# from imports import *
# import random
# class Docs(commands.Cog):
# def __init__(self, bot):
# self.bot = bot
# self.bot.loop.create_task(self.__ainit__())
# async def __ainit__(self):
# await self.bot.wait_until_ready()
# self.scraper = AsyncScraper(session = self.bot.session)
# async def rtfm_lookup(self, program = None, *, args = None):
# rtfm_dictionary = {
# "fusion.py": "https://fusion.senarc.org/en/master/",
# "development" : "https://fusion.senarc.org/en/development/"
# }
# if not args:
# return rtfm_dictionary.get(program)
# else:
# url = rtfm_dictionary.get(program)
# results = await self.scraper.search(args, page=url)
# if not results:
# return f"Could not find anything with {args}."
# else:
# return results
# def reference(self, message):
# reference = message.reference
# if reference and isinstance(reference.resolved, discord.Message):
# return reference.resolved.to_reference()
# return None
# async def rtfm_send(self, ctx, results):
# if isinstance(results, str):
# await ctx.send(results, allowed_mentions = discord.AllowedMentions.none())
# else:
# embed = discord.Embed(color = random.randint(0, 16777215))
# results = results[:10]
# embed.description = "\n".join(f"[`{result}`]({value})" for result, value in results)
# reference = self.reference(ctx.message)
# await ctx.send(embed=embed, reference = reference)
# @commands.group(slash_interaction=True, aliases=["rtd", "rtfs"], brief="Search for attributes from docs.")
# async def rtfm(self, ctx, *, args = None):
# await ctx.trigger_typing()
# results = await self.rtfm_lookup(program = "fusion.py", args = args)
# await self.rtfm_send(ctx, results)
# @rtfm.command(slash_interaction=True, brief = "a command using doc_search to look up at development's docs")
# async def development(self, ctx, *, args = None):
# await ctx.trigger_typing()
# results = await self.rtfm_lookup(program="development", args = args)
# await self.rtfm_send(ctx, results)
# def setup(bot):
# bot.add_cog(Docs(bot))
| 30.082192 | 112 | 0.651184 | [
"MIT"
] | BenitzCoding/Utility-Bot | cogs/docs.py | 2,196 | Python |
import typer
from typing import Optional
from article_ripper import get_document, html_to_md
app = typer.Typer()
@app.command()
def fun(url: str, out: Optional[str] = None) -> None:
doc = get_document(url)
doc_summary = doc.summary()
if out is None:
print(doc_summary)
else:
with open(out, "w") as f:
f.write(doc_summary)
def run() -> None:
app()
if __name__ == "__main__":
run()
| 17.56 | 53 | 0.624146 | [
"MIT"
] | nozwock/article-ripper | src/article_ripper/cli.py | 439 | Python |
from django.conf.urls import url, include
import binder.router # noqa
import binder.websocket # noqa
import binder.views # noqa
import binder.history # noqa
import binder.models # noqa
import binder.plugins.token_auth.views # noqa
from binder.plugins.views.multi_request import multi_request_view
from .views import animal, caretaker, costume, custom, zoo, contact_person, gate # noqa
router = binder.router.Router().register(binder.views.ModelView)
room_controller = binder.websocket.RoomController().register(binder.views.ModelView)
urlpatterns = [
url(r'^custom/route', custom.custom, name='custom'),
# url(r'^user/$', custom.user, name='user'),
url(r'^multi/$', multi_request_view, name='multi_request'),
url(r'^', include(router.urls)),
url(r'^', binder.views.api_catchall, name='catchall'),
]
# FIXME: Hmm, this is a bit hackish. Especially here. But where else?
binder.models.install_history_signal_handlers(binder.models.BinderModel)
| 38.04 | 87 | 0.768665 | [
"MIT"
] | BBooijLiewes/django-binder | tests/testapp/urls.py | 951 | Python |
from .brand import BrandDataset, Brand
from .vehicle_id import VehicleIDDataset
from .comp_cars import CompCarsDataset
# from .veri import VeriDataset
from .box_cars import BoxCars116kDataset
# from .vric import VRICDataset
from .cars196 import Cars196Dataset | 37 | 40 | 0.84556 | [
"MIT"
] | piyengar/vehicle-predictor | experiments/brand/dataset/__init__.py | 259 | Python |
"""Certbot constants."""
import os
import logging
from acme import challenges
SETUPTOOLS_PLUGINS_ENTRY_POINT = "certbot.plugins"
"""Setuptools entry point group name for plugins."""
OLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Plugins Setuptools entry point before rename."""
CLI_DEFAULTS = dict(
config_files=[
"/etc/letsencrypt/cli.ini",
# http://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
dry_run=False,
verbose_count=-int(logging.INFO / 10),
server="https://acme-v01.api.letsencrypt.org/directory",
rsa_key_size=2048,
rollback_checkpoints=1,
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
no_verify_ssl=False,
http01_port=challenges.HTTP01Response.PORT,
http01_address="",
tls_sni_01_port=challenges.TLSSNI01Response.PORT,
tls_sni_01_address="",
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
strict_permissions=False,
debug_challenges=False,
)
STAGING_URI = "https://acme-staging.api.letsencrypt.org/directory"
# The set of reasons for revoking a certificate is defined in RFC 5280 in
# section 5.3.1. The reasons that users are allowed to submit are restricted to
# those accepted by the ACME server implementation. They are listed in
# `letsencrypt.boulder.revocation.reasons.go`.
REVOCATION_REASONS = {
"unspecified": 0,
"keycompromise": 1,
"affiliationchanged": 3,
"superseded": 4,
"cessationofoperation": 5}
"""Defaults for CLI flags and `.IConfig` attributes."""
QUIET_LOGGING_LEVEL = logging.WARNING
"""Logging level to use in quiet mode."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
# This value should ensure that there is never a deployment delay by
# default.
deploy_before_expiry="99 years",
)
"""Defaults for renewer script."""
ENHANCEMENTS = ["redirect", "http-header", "ocsp-stapling", "spdy"]
"""List of possible :class:`certbot.interfaces.IInstaller`
enhancements.
List of expected options parameters:
- redirect: None
- http-header: TODO
- ocsp-stapling: certificate chain file path
- spdy: TODO
"""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `IConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``.IConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
BACKUP_DIR = "backups"
"""Directory (relative to `IConfig.work_dir`) where backups are kept."""
CSR_DIR = "csr"
"""See `.IConfig.csr_dir`."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`IConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `IConfig.config_dir`) where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `IConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
RENEWAL_CONFIGS_DIR = "renewal"
"""Renewal configs directory, relative to `IConfig.config_dir`."""
FORCE_INTERACTIVE_FLAG = "--force-interactive"
"""Flag to disable TTY checking in IDisplay."""
EFF_SUBSCRIBE_URI = "https://supporters.eff.org/subscribe/certbot"
"""EFF URI used to submit the e-mail address of users who opt-in."""
| 29.594828 | 79 | 0.718905 | [
"Apache-2.0"
] | Randagio13/certbot | certbot/constants.py | 3,433 | Python |
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
from pathlib import Path
from subprocess import check_output
from time import sleep
import pytest
import yaml
from selenium import webdriver
from selenium.common.exceptions import JavascriptException, WebDriverException
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test):
my_charm = await ops_test.build_charm(".")
image_path = METADATA["resources"]["oci-image"]["upstream-source"]
await ops_test.model.deploy(my_charm, resources={"oci-image": image_path})
charm_name = METADATA["name"]
await ops_test.model.wait_for_idle(
[charm_name],
raise_on_blocked=True,
raise_on_error=True,
timeout=300,
)
assert ops_test.model.applications[charm_name].units[0].workload_status == "waiting"
assert (
ops_test.model.applications[charm_name].units[0].workload_status_message
== "Waiting for kubeflow-profiles relation data"
)
@pytest.mark.abort_on_fail
async def test_add_profile_relation(ops_test):
charm_name = METADATA["name"]
# TODO: Point kubeflow-profiles to latest/stable when Rev 54 or higher are promoted
await ops_test.model.deploy("kubeflow-profiles", channel="latest/edge")
await ops_test.model.add_relation("kubeflow-profiles", charm_name)
await ops_test.model.wait_for_idle(
["kubeflow-profiles", charm_name],
status="active",
raise_on_blocked=True,
raise_on_error=True,
timeout=300,
)
async def test_status(ops_test):
charm_name = METADATA["name"]
assert ops_test.model.applications[charm_name].units[0].workload_status == "active"
def fix_queryselector(elems):
"""Workaround for web components breaking querySelector.
Because someone thought it was a good idea to just yeet the moral equivalent
of iframes everywhere over a single page 🤦
Shadow DOM was a terrible idea and everyone involved should feel professionally
ashamed of themselves. Every problem it tried to solved could and should have
been solved in better ways that don't break the DOM.
"""
selectors = '").shadowRoot.querySelector("'.join(elems)
return 'return document.querySelector("' + selectors + '")'
@pytest.fixture()
async def driver(request, ops_test):
status = yaml.safe_load(
check_output(
["juju", "status", "-m", ops_test.model_full_name, "--format=yaml"]
)
)
endpoint = status["applications"]["kubeflow-dashboard"]["address"]
application = ops_test.model.applications["kubeflow-dashboard"]
config = await application.get_config()
port = config["port"]["value"]
url = f"http://{endpoint}.nip.io:{port}/"
options = Options()
options.headless = True
with webdriver.Firefox(options=options) as driver:
wait = WebDriverWait(driver, 180, 1, (JavascriptException, StopIteration))
for _ in range(60):
try:
driver.get(url)
break
except WebDriverException:
sleep(5)
else:
driver.get(url)
yield driver, wait, url
driver.get_screenshot_as_file(f"/tmp/selenium-{request.node.name}.png")
def test_links(driver):
driver, wait, url = driver
# Ensure that sidebar links are set up properly
links = [
"/jupyter/",
"/katib/",
"/pipeline/#/experiments",
"/pipeline/#/pipelines",
"/pipeline/#/runs",
"/pipeline/#/recurringruns",
# Removed temporarily until https://warthogs.atlassian.net/browse/KF-175 is fixed
# "/pipeline/#/artifacts",
# "/pipeline/#/executions",
"/volumes/",
"/tensorboards/",
]
for link in links:
print("Looking for link: %s" % link)
script = fix_queryselector(["main-page", f"iframe-link[href='{link}']"])
wait.until(lambda x: x.execute_script(script))
# Ensure that quick links are set up properly
links = [
"/pipeline/",
"/pipeline/#/runs",
"/jupyter/new?namespace=kubeflow",
"/katib/",
]
for link in links:
print("Looking for link: %s" % link)
script = fix_queryselector(
[
"main-page",
"dashboard-view",
f"iframe-link[href='{link}']",
]
)
wait.until(lambda x: x.execute_script(script))
# Ensure that doc links are set up properly
links = [
"https://charmed-kubeflow.io/docs/kubeflow-basics",
"https://microk8s.io/docs/addon-kubeflow",
"https://www.kubeflow.org/docs/started/requirements/",
]
for link in links:
print("Looking for link: %s" % link)
script = fix_queryselector(
[
"main-page",
"dashboard-view",
f"a[href='{link}']",
]
)
wait.until(lambda x: x.execute_script(script))
| 31.357576 | 89 | 0.63993 | [
"Apache-2.0"
] | VariableDeclared/kubeflow-dashboard-operator | tests/integration/test_charm.py | 5,177 | Python |
# Copyright (c) 2017 Sofia Ira Ktena <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords | 32.241379 | 115 | 0.648217 | [
"MIT"
] | HoganZhang/gcn_metric_learning | lib/abide_utils.py | 11,220 | Python |
from typing import List
import numpy as np
def mask_nan(arrays: List[np.ndarray]) -> List[np.ndarray]:
"""
Drop indices from equal-sized arrays if the element at that index is NaN in
any of the input arrays.
Parameters
----------
arrays : List[np.ndarray]
list of ndarrays containing NaNs, to be masked
Returns
-------
List[np.ndarray]
masked arrays (free of NaNs)
Notes
-----
This function find the indices where one or more elements is NaN in one or
more of the input arrays, then drops those indices from all arrays.
For example:
>> a = np.array([0, 1, np.nan, 3])
>> b = np.array([np.nan, 5, np.nan, 7])
>> c = np.array([8, 9, 10, 11])
>> mask_nan([a, b, c])
[array([ 1., 3.]), array([ 5., 7.]), array([ 9, 11])]
"""
n = arrays[0].size
assert all(a.size == n for a in arrays[1:])
mask = np.array([False] * n)
for arr in arrays:
mask = np.logical_or(mask, np.isnan(arr))
return [arr[np.where(~mask)[0]] for arr in arrays]
| 27.128205 | 79 | 0.581285 | [
"MIT"
] | jbburt/jburt | jburt/mask.py | 1,058 | Python |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure Python crypto-related routines for oauth2client.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates.
"""
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import sixm
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in sixm.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
sixm.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
| 34.432432 | 79 | 0.609733 | [
"Apache-2.0"
] | giangpvit/googledrivepythonsample | test/lib/oauth2client/_pure_python_crypt.py | 6,370 | Python |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Coupon, obj[2]: Education, obj[3]: Occupation, obj[4]: Restaurant20to50, obj[5]: Distance
# {"feature": "Passanger", "instances": 34, "metric_value": 0.9774, "depth": 1}
if obj[0]<=1:
# {"feature": "Restaurant20to50", "instances": 22, "metric_value": 0.7732, "depth": 2}
if obj[4]<=3.0:
# {"feature": "Education", "instances": 21, "metric_value": 0.7025, "depth": 3}
if obj[2]<=2:
# {"feature": "Occupation", "instances": 16, "metric_value": 0.8113, "depth": 4}
if obj[3]<=10:
# {"feature": "Coupon", "instances": 13, "metric_value": 0.6194, "depth": 5}
if obj[1]>2:
# {"feature": "Distance", "instances": 7, "metric_value": 0.8631, "depth": 6}
if obj[5]<=2:
return 'False'
elif obj[5]>2:
return 'False'
else: return 'False'
elif obj[1]<=2:
return 'False'
else: return 'False'
elif obj[3]>10:
# {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[1]<=2:
return 'True'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[2]>2:
return 'False'
else: return 'False'
elif obj[4]>3.0:
return 'True'
else: return 'True'
elif obj[0]>1:
# {"feature": "Coupon", "instances": 12, "metric_value": 0.8113, "depth": 2}
if obj[1]>0:
# {"feature": "Occupation", "instances": 11, "metric_value": 0.684, "depth": 3}
if obj[3]<=20:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.469, "depth": 4}
if obj[4]>1.0:
return 'True'
elif obj[4]<=1.0:
# {"feature": "Education", "instances": 4, "metric_value": 0.8113, "depth": 5}
if obj[2]<=1:
return 'True'
elif obj[2]>1:
# {"feature": "Distance", "instances": 2, "metric_value": 1.0, "depth": 6}
if obj[5]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>20:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'True'
| 34.508197 | 140 | 0.564371 | [
"MIT"
] | apcarrik/kaggle | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/6_features/numtrees_30/rule_0.py | 2,105 | Python |
import AnalysisModule as Ass
import GraphFunctions as Gfs
import yfinance as yf
import DatabaseStocks as Ds
listOfStocksToAnalyze = Ds.get_lists()
macdproposedbuylist = []
macdproposedselllist = []
proposedbuylist = []
proposedselllist = []
for stock in listOfStocksToAnalyze:
# print(stock)
StockData = yf.Ticker(stock).history(period="1y")
if Ass.macd_potential_buy(StockData) and Ass.is_stock_rising(StockData):
macdproposedbuylist.append(stock)
print("MACD Something you might wanna buy is " + stock)
continue
if Ass.macd_potential_sell(StockData) and Ass.is_stock_falling(StockData):
macdproposedselllist.append(stock)
print("MACD Something you might wanna sell is " + stock)
if Ass.sma_potential_buy(StockData):
proposedbuylist.append(stock)
print("Something you might wanna buy is " + stock)
continue
if Ass.sma_potential_sell(StockData):
proposedselllist.append(stock)
print("Something you might wanna sell is " + stock)
print(macdproposedselllist)
print(macdproposedbuylist)
for stock in macdproposedbuylist:
StockData = yf.Ticker(stock).history(period="1y")
Gfs.draw_macd_buy(StockData, "BUY " + stock)
for stock in macdproposedselllist:
StockData = yf.Ticker(stock).history(period="1y")
Gfs.draw_macd_sell(StockData, "SELL " + stock)
for stock in proposedbuylist:
StockData = yf.Ticker(stock).history(period="1y")
Gfs.draw_macd_buy(StockData, "BUY MA " + stock)
for stock in proposedselllist:
StockData = yf.Ticker(stock).history(period="1y")
Gfs.draw_macd_sell(StockData, "SELL MA " + stock)
| 31.75 | 78 | 0.724409 | [
"MIT"
] | VaseSimion/Finance | workfile.py | 1,651 | Python |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yolov2_ros.srv import *
import rospy
from copy import deepcopy
from core import YOLO
from vision_msgs.msg import Detection2DArray, Detection2D, BoundingBox2D, ObjectHypothesisWithPose
from geometry_msgs.msg import PoseWithCovariance, Pose2D
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class YoloServer(object):
def __init__(self):
self.bridge = CvBridge()
self.n_gpu = rospy.get_param('~n_gpu', default=1)
self.backend = rospy.get_param('~backend', default='full_yolo') # Either 'tiny_yolo', full_yolo, 'mobile_net, 'squeeze_net', or 'inception3'
self.backend_path = rospy.get_param('~weights_path') # Weights directory
self.input_size = rospy.get_param('~input_size', default=416) # DO NOT change this. 416 is default for YOLO.
self.labels = rospy.get_param('~labels') # Eg: ['trafficcone', 'person', 'dog']
self.max_number_detections = rospy.get_param('~max_number_detections', default=5) # Max number of detections
self.anchors = rospy.get_param('~anchors', default=[0.57273, 0.677385, 1.87446, # The anchors to use. Use the anchor generator and copy these into the config.
2.06253, 3.33843, 5.47434, 7.88282,
3.52778, 9.77052, 9.16828])
self.weights_path = rospy.get_param('~weights_path', default='../weights/full_yolo.h5') # Path to the weights.h5 file
self.weight_file = rospy.get_param('~weight_file')
self.yolo = YOLO(
n_gpu=self.n_gpu,
backend = self.backend,
backend_path=self.backend_path,
input_size = self.input_size,
labels = self.labels,
max_box_per_image = self.max_number_detections,
anchors = self.anchors
)
self.yolo.load_weights(self.weights_path + '/' + self.weight_file)
rospy.loginfo('YOLO detector ready...')
s = rospy.Service('yolo_detect', YoloDetect, self._handle_yolo_detect, buff_size=10000000)
s.spin()
def _handle_yolo_detect(self, req):
cv_image = None
detection_array = Detection2DArray()
detections = []
boxes = None
try:
cv_image = self.bridge.imgmsg_to_cv2(req.image, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
try:
boxes = self.yolo.predict(cv_image)
except SystemError:
pass
# rospy.loginfo('Found {} boxes'.format(len(boxes)))
for box in boxes:
detection = Detection2D()
results = []
bbox = BoundingBox2D()
center = Pose2D()
detection.header = Header()
detection.header.stamp = rospy.get_rostime()
# detection.source_img = deepcopy(req.image)
labels = box.get_all_labels()
for i in range(0,len(labels)):
object_hypothesis = ObjectHypothesisWithPose()
object_hypothesis.id = i
object_hypothesis.score = labels[i]
results.append(object_hypothesis)
detection.results = results
x, y = box.get_xy_center()
center.x = x
center.y = y
center.theta = 0.0
bbox.center = center
size_x, size_y = box.get_xy_extents()
bbox.size_x = size_x
bbox.size_y = size_y
detection.bbox = bbox
detections.append(detection)
detection_array.header = Header()
detection_array.header.stamp = rospy.get_rostime()
detection_array.detections = detections
return YoloDetectResponse(detection_array)
if __name__ == '__main__':
rospy.init_node('yolo_server')
try:
ys = YoloServer()
except rospy.ROSInterruptException:
pass | 37.853448 | 175 | 0.59918 | [
"BSD-3-Clause"
] | diggerdata/yolov2_ros | scripts/yolo_server.py | 4,391 | Python |
from . import IGGrid
def get():
return IGGrid.IGGrid()
| 10.166667 | 26 | 0.655738 | [
"MIT"
] | bertrandboudaud/imagegraph | nodes/IGGrid/__init__.py | 61 | Python |
from django.urls import path, include
from . import views
app_name = 'test_app'
urlpatterns = [
path('', views.index, name='index'),
] | 17.5 | 40 | 0.685714 | [
"MIT"
] | nosu23/docker-gunicorn-django-example | gunicorn/src/test_app/urls.py | 140 | Python |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import RitoTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to ritod, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(RitoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes ritod to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# ritod's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that ritod requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for ritod to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# ritod will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| 47.108253 | 137 | 0.668912 | [
"MIT"
] | RitoProject/Ravencoin | test/functional/p2p_compactblocks.py | 43,952 | Python |
import logging
import io
from homeassistant.core import callback
from homeassistant.components.ais_dom import ais_global
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.components.camera import Camera
from homeassistant.helpers.event import async_track_state_change
from datetime import timedelta
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "qr_code"
SCAN_INTERVAL = timedelta(seconds=2000)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the QRCode image platform."""
add_entities([QRCodeCamera(hass, "remote_access", "remote_access")])
class QRCodeCamera(Camera):
"""Representation of an QRCode image."""
def __init__(self, hass, name, entity_ids):
"""Initialize the QRCode entity."""
super().__init__()
self._hass = hass
self._name = name
self._entities = entity_ids
self._image = io.BytesIO()
self._refresh_()
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def qr_state_listener(entity, old_state, new_state):
"""Handle device state changes."""
self._refresh_()
@callback
def qr_sensor_startup(event):
"""Update template on startup."""
async_track_state_change(self.hass, self._entities, qr_state_listener)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, qr_sensor_startup)
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def should_poll(self):
"""Update the recording state periodically."""
return True
@property
def state(self):
gate_id = ais_global.get_sercure_android_id_dom()
return "https://" + gate_id + ".paczka.pro"
def camera_image(self):
"""Process the image."""
return self._image.getvalue()
def turn_on(self):
"""Turn on camera."""
self._refresh_()
def turn_off(self):
pass
def enable_motion_detection(self):
pass
def disable_motion_detection(self):
pass
def _refresh_(self):
import pyqrcode
import png
gate_id = ais_global.get_sercure_android_id_dom()
_template = "https://" + gate_id + ".paczka.pro"
qr_code = pyqrcode.create(_template)
self._image.truncate(0)
self._image.seek(0)
qr_code.png(
self._image, scale=6, module_color=[0, 0, 0], background=[0xFF, 0xFF, 0xFF]
)
| 27.168421 | 87 | 0.655172 | [
"Apache-2.0"
] | DRubioBizcaino/AIS-home-assistant | homeassistant/components/ais_qrcode/camera.py | 2,581 | Python |
from datetime import date, timedelta
from app import create_app, new_functions as nf
from app.constants import weekend_answer
from app.models import User
from tg_bot import bot
def schedule_sender():
send_cnt, err_cnt = 0, 0
for user in User.query.filter_by(is_subscribed=True).all():
answer = user.create_answer_for_date(
for_date=date.today() + timedelta(days=1)
)
if answer == weekend_answer:
continue
try:
answer = "Расписание на завтра:\n\n" + answer
nf.tgbot_send_long_message(bot, answer, user.tg_id)
send_cnt += 1
except Exception as err:
print("---------------ERROR START---------------")
print(err)
print("USER ID:", user.id)
print("----------------ERROR END----------------")
err_cnt += 1
return send_cnt, err_cnt
if __name__ == '__main__':
with create_app().app_context():
print("OK: {0}; ERRORS: {1}".format(*schedule_sender()))
| 29.514286 | 64 | 0.573088 | [
"Apache-2.0"
] | EeOneDown/spbu4u | tg_schedule_sender.py | 1,051 | Python |
from collections import defaultdict
dd = defaultdict(list)
n, m = map(int, input().split())
groupA = []
for i in range(n):
a_element = input()
dd[a_element].append(str(i+1))
for _ in range(m):
b_element = input()
if b_element not in dd:
print(-1)
else:
print(" ".join(dd[b_element]))
| 18 | 38 | 0.601852 | [
"MIT"
] | rawat9/HackerRank | Python/Collections/DefaultDict Tutorial/solution.py | 324 | Python |
#!/usr/bin/env python
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(name="pipelinewise-target-s3-csv",
version="1.4.0",
description="Singer.io target for writing CSV files and upload to S3 - PipelineWise compatible",
long_description=long_description,
long_description_content_type='text/markdown',
author="TransferWise",
url='https://github.com/transferwise/pipelinewise-target-s3-csv',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only'
],
py_modules=["target_s3_csv"],
install_requires=[
'pipelinewise-singer-python==1.*',
'inflection==0.3.1',
'boto3==1.9.57',
],
extras_require={
"test": [
"nose==1.3.7",
"pylint==2.4.2"
]
},
entry_points="""
[console_scripts]
target-s3-csv=target_s3_csv:main
""",
packages=["target_s3_csv"],
package_data = {},
include_package_data=True,
)
| 28.769231 | 102 | 0.583779 | [
"Apache-2.0"
] | EasyPost/pipelinewise-target-s3-csv | setup.py | 1,122 | Python |
from datetime import datetime
import logging
import os
import subprocess
import sys
from argparse import Namespace
logging.getLogger("transformers").setLevel(logging.WARNING)
import click
import torch
from luke.utils.model_utils import ModelArchive
from zero.utils.experiment_logger import commet_logger_args, CometLogger, NullLogger
LOG_FORMAT = "[%(asctime)s] [%(levelname)s] %(message)s (%(funcName)s@%(filename)s:%(lineno)s)"
try:
import absl.logging
# https://github.com/tensorflow/tensorflow/issues/27045#issuecomment-519642980
logging.getLogger().removeHandler(absl.logging._absl_handler)
absl.logging._warn_preinit_stderr = False
except ImportError:
pass
logger = logging.getLogger(__name__)
@click.group()
@click.option(
"--output-dir", default="models", type=click.Path()
)
@click.option("--num-gpus", default=1)
@click.option("--experiment-logger", "--logger", type=click.Choice(["comet"]))
@click.option("--master-port", default=29500)
@click.option("--local-rank", "--local_rank", default=-1)
@click.option("--model-file", type=click.Path(exists=True))
@click.option("--device-id", type=int)
@commet_logger_args
@click.pass_context
def cli(ctx, **kwargs):
args = Namespace(**kwargs)
if args.local_rank == -1 and args.num_gpus > 1:
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = "127.0.0.1"
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(args.num_gpus)
processes = []
for args.local_rank in range(0, args.num_gpus):
current_env["RANK"] = str(args.local_rank)
current_env["LOCAL_RANK"] = str(args.local_rank)
cmd = [sys.executable, "-u", "-m", "examples.cli", "--local-rank={}".format(args.local_rank)]
cmd.extend(sys.argv[1:])
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
sys.exit(0)
else:
if args.local_rank not in (-1, 0):
logging.basicConfig(format=LOG_FORMAT, level=logging.WARNING)
else:
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Output dir: %s", args.output_dir)
# NOTE: ctx.obj is documented here: http://click.palletsprojects.com/en/7.x/api/#click.Context.obj
ctx.obj = dict(local_rank=args.local_rank, output_dir=args.output_dir)
if args.num_gpus == 0:
ctx.obj["device"] = torch.device("cpu")
elif args.local_rank == -1:
ctx.obj["device"] = torch.device("cuda:{}".format(args.device_id))
else:
torch.cuda.set_device(args.local_rank)
ctx.obj["device"] = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
experiment_logger = NullLogger()
if args.local_rank in (-1, 0) and args.experiment_logger == "comet":
experiment_logger = CometLogger(args)
experiment_logger.log_parameters({p.name: getattr(args, p.name) for p in cli.params})
ctx.obj["experiment"] = experiment_logger
if args.model_file:
model_archive = ModelArchive.load(args.model_file)
ctx.obj["tokenizer"] = model_archive.tokenizer
ctx.obj["entity_vocab"] = model_archive.entity_vocab
ctx.obj["bert_model_name"] = model_archive.bert_model_name
ctx.obj["model_config"] = model_archive.config
ctx.obj["max_mention_length"] = model_archive.max_mention_length
ctx.obj["model_weights"] = model_archive.state_dict
experiment_logger.log_parameter("model_file_name", os.path.basename(args.model_file))
from zero.ner.main import cli as ner_cli
cli.add_command(ner_cli)
if __name__ == "__main__":
cli() | 35.307692 | 106 | 0.663762 | [
"MIT"
] | nguyenvanhoang7398/nndl2-project | zero/cli.py | 4,131 | Python |
count=0
n=4
for i in range(1,n+1):
for j in range(1,n+1):
if i==j:
continue
for k in range(1,n+1):
if i==k or j==k:
continue
count += 1
print(f"第{count}种方案:甲担任{i}号课代表,乙担任{j}号课代表,丙担任{k}号课代表")
print(f"一共{count}种方案") | 22.923077 | 66 | 0.466443 | [
"MIT"
] | royqh1979/programming_with_python | Chap02Loops/5-4.无重复组合.py | 364 | Python |
Subsets and Splits